From 4b0b97c2b646c726139515d8679cd1779dbf1688 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Fri, 19 Dec 2014 04:54:12 +0000 Subject: [PATCH] Import docker.io_1.3.3~dfsg1.orig.tar.gz [dgit import orig docker.io_1.3.3~dfsg1.orig.tar.gz] --- .dockerignore | 2 + .drone.yml | 14 + .gitignore | 29 + .mailmap | 99 + .travis.yml | 39 + AUTHORS | 597 +++ CHANGELOG.md | 1563 ++++++++ CONTRIBUTING.md | 316 ++ Dockerfile | 113 + LICENSE | 191 + MAINTAINERS | 9 + Makefile | 68 + NOTICE | 19 + README.md | 205 + VERSION | 1 + api/MAINTAINERS | 1 + api/README.md | 5 + api/api_unit_test.go | 19 + api/client/cli.go | 148 + api/client/commands.go | 2548 ++++++++++++ api/client/hijack.go | 230 ++ api/client/utils.go | 288 ++ api/common.go | 49 + api/server/MAINTAINERS | 2 + api/server/server.go | 1532 ++++++++ api/server/server_unit_test.go | 555 +++ builder/MAINTAINERS | 2 + builder/dispatchers.go | 353 ++ builder/evaluator.go | 236 ++ builder/internals.go | 689 ++++ builder/job.go | 130 + builder/parser/dumper/main.go | 32 + builder/parser/line_parsers.go | 155 + builder/parser/parser.go | 140 + builder/parser/parser_test.go | 82 + .../env_equals_env/Dockerfile | 3 + .../Dockerfile | 2 + .../shykes-nested-json/Dockerfile | 1 + .../testfiles/brimstone-consuldock/Dockerfile | 25 + .../testfiles/brimstone-consuldock/result | 5 + .../brimstone-docker-consul/Dockerfile | 52 + .../testfiles/brimstone-docker-consul/result | 9 + .../testfiles/continueIndent/Dockerfile | 36 + .../parser/testfiles/continueIndent/result | 10 + .../testfiles/cpuguy83-nagios/Dockerfile | 54 + .../parser/testfiles/cpuguy83-nagios/result | 40 + builder/parser/testfiles/docker/Dockerfile | 105 + builder/parser/testfiles/docker/result | 25 + builder/parser/testfiles/escapes/Dockerfile | 14 + builder/parser/testfiles/escapes/result | 6 + builder/parser/testfiles/influxdb/Dockerfile | 15 + builder/parser/testfiles/influxdb/result | 11 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../jeztah-invalid-json-single-quotes/result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../kartar-entrypoint-oddities/Dockerfile | 7 + .../kartar-entrypoint-oddities/result | 7 + .../lk4d4-the-edge-case-generator/Dockerfile | 48 + .../lk4d4-the-edge-case-generator/result | 29 + builder/parser/testfiles/mail/Dockerfile | 16 + builder/parser/testfiles/mail/result | 14 + .../testfiles/multiple-volumes/Dockerfile | 3 + .../parser/testfiles/multiple-volumes/result | 2 + builder/parser/testfiles/mumble/Dockerfile | 7 + builder/parser/testfiles/mumble/result | 4 + builder/parser/testfiles/nginx/Dockerfile | 14 + builder/parser/testfiles/nginx/result | 11 + builder/parser/testfiles/tf2/Dockerfile | 23 + builder/parser/testfiles/tf2/result | 20 + builder/parser/testfiles/weechat/Dockerfile | 9 + builder/parser/testfiles/weechat/result | 6 + builder/parser/testfiles/znc/Dockerfile | 7 + builder/parser/testfiles/znc/result | 5 + builder/parser/utils.go | 94 + builder/support.go | 59 + builtins/builtins.go | 75 + contrib/MAINTAINERS | 1 + contrib/README | 4 + contrib/check-config.sh | 171 + contrib/completion/bash/docker | 780 ++++ contrib/completion/fish/docker.fish | 285 ++ contrib/completion/zsh/_docker | 471 +++ contrib/desktop-integration/README.md | 11 + .../desktop-integration/chromium/Dockerfile | 38 + .../desktop-integration/gparted/Dockerfile | 33 + contrib/docker-device-tool/device_tool.go | 170 + contrib/host-integration/Dockerfile.dev | 27 + contrib/host-integration/Dockerfile.min | 4 + contrib/host-integration/manager.go | 130 + contrib/host-integration/manager.sh | 53 + contrib/host-integration/manager/systemd | 20 + contrib/host-integration/manager/upstart | 15 + contrib/init/openrc/docker.confd | 13 + contrib/init/openrc/docker.initd | 34 + contrib/init/systemd/MAINTAINERS | 2 + contrib/init/systemd/docker.service | 13 + contrib/init/systemd/docker.socket | 12 + contrib/init/sysvinit-debian/docker | 141 + contrib/init/sysvinit-debian/docker.default | 13 + contrib/init/sysvinit-redhat/docker | 130 + contrib/init/sysvinit-redhat/docker.sysconfig | 7 + contrib/init/upstart/docker.conf | 41 + contrib/mkimage-alpine.sh | 82 + contrib/mkimage-arch-pacman.conf | 92 + contrib/mkimage-arch.sh | 65 + contrib/mkimage-busybox.sh | 43 + contrib/mkimage-crux.sh | 75 + contrib/mkimage-debootstrap.sh | 297 ++ contrib/mkimage-rinse.sh | 123 + contrib/mkimage-unittest.sh | 49 + contrib/mkimage-yum.sh | 98 + contrib/mkimage.sh | 107 + contrib/mkimage/.febootstrap-minimize | 28 + contrib/mkimage/busybox-static | 34 + contrib/mkimage/debootstrap | 193 + contrib/mkimage/mageia-urpmi | 61 + contrib/mkimage/rinse | 25 + contrib/mkseccomp.pl | 77 + contrib/mkseccomp.sample | 444 +++ contrib/nuke-graph-directory.sh | 64 + contrib/syntax/kate/Dockerfile.xml | 68 + .../Preferences/Dockerfile.tmPreferences | 24 + .../Syntaxes/Dockerfile.tmLanguage | 93 + .../textmate/Docker.tmbundle/info.plist | 16 + contrib/syntax/textmate/MAINTAINERS | 1 + contrib/syntax/textmate/README.md | 16 + contrib/syntax/vim/LICENSE | 22 + contrib/syntax/vim/README.md | 23 + contrib/syntax/vim/doc/dockerfile.txt | 18 + contrib/syntax/vim/ftdetect/dockerfile.vim | 1 + contrib/syntax/vim/syntax/dockerfile.vim | 23 + contrib/udev/80-docker.rules | 3 + contrib/vagrant-docker/README.md | 50 + daemon/MAINTAINERS | 6 + daemon/README.md | 10 + daemon/attach.go | 273 ++ daemon/changes.go | 32 + daemon/commit.go | 84 + daemon/config.go | 86 + daemon/container.go | 1246 ++++++ daemon/container_unit_test.go | 197 + daemon/copy.go | 33 + daemon/create.go | 101 + daemon/daemon.go | 1126 ++++++ daemon/daemon_aufs.go | 22 + daemon/daemon_btrfs.go | 7 + daemon/daemon_devicemapper.go | 7 + daemon/daemon_no_aufs.go | 11 + daemon/daemon_unit_test.go | 39 + daemon/delete.go | 125 + daemon/exec.go | 301 ++ daemon/execdriver/MAINTAINERS | 2 + daemon/execdriver/driver.go | 121 + daemon/execdriver/execdrivers/execdrivers.go | 23 + daemon/execdriver/lxc/MAINTAINERS | 1 + daemon/execdriver/lxc/driver.go | 527 +++ daemon/execdriver/lxc/info.go | 50 + daemon/execdriver/lxc/info_test.go | 36 + daemon/execdriver/lxc/init.go | 213 + daemon/execdriver/lxc/lxc_init_linux.go | 78 + daemon/execdriver/lxc/lxc_init_unsupported.go | 13 + daemon/execdriver/lxc/lxc_template.go | 151 + .../execdriver/lxc/lxc_template_unit_test.go | 142 + daemon/execdriver/native/create.go | 183 + daemon/execdriver/native/driver.go | 311 ++ .../execdriver/native/driver_unsupported.go | 13 + .../native/driver_unsupported_nocgo.go | 13 + daemon/execdriver/native/exec.go | 70 + daemon/execdriver/native/info.go | 30 + daemon/execdriver/native/init.go | 66 + .../native/template/default_template.go | 47 + daemon/execdriver/native/utils.go | 35 + daemon/execdriver/pipes.go | 23 + daemon/execdriver/termconsole.go | 46 + daemon/execdriver/utils.go | 63 + daemon/export.go | 30 + daemon/graphdriver/aufs/aufs.go | 450 +++ daemon/graphdriver/aufs/aufs_test.go | 703 ++++ daemon/graphdriver/aufs/dirs.go | 46 + daemon/graphdriver/aufs/migrate.go | 194 + daemon/graphdriver/aufs/mount.go | 18 + daemon/graphdriver/aufs/mount_linux.go | 9 + daemon/graphdriver/aufs/mount_unsupported.go | 11 + daemon/graphdriver/btrfs/MAINTAINERS | 1 + daemon/graphdriver/btrfs/btrfs.go | 225 ++ daemon/graphdriver/btrfs/btrfs_test.go | 28 + daemon/graphdriver/btrfs/dummy_unsupported.go | 3 + daemon/graphdriver/devmapper/MAINTAINERS | 1 + daemon/graphdriver/devmapper/README.md | 156 + .../graphdriver/devmapper/attach_loopback.go | 129 + daemon/graphdriver/devmapper/deviceset.go | 1253 ++++++ daemon/graphdriver/devmapper/devmapper.go | 646 +++ daemon/graphdriver/devmapper/devmapper_doc.go | 106 + daemon/graphdriver/devmapper/devmapper_log.go | 30 + .../graphdriver/devmapper/devmapper_test.go | 37 + .../devmapper/devmapper_wrapper.go | 240 ++ daemon/graphdriver/devmapper/driver.go | 151 + daemon/graphdriver/devmapper/ioctl.go | 72 + daemon/graphdriver/devmapper/mount.go | 86 + daemon/graphdriver/driver.go | 156 + daemon/graphdriver/fsdiff.go | 166 + daemon/graphdriver/graphtest/graphtest.go | 229 ++ daemon/graphdriver/vfs/driver.go | 104 + daemon/graphdriver/vfs/vfs_test.go | 35 + daemon/history.go | 33 + daemon/image_delete.go | 156 + daemon/info.go | 74 + daemon/inspect.go | 67 + daemon/kill.go | 59 + daemon/list.go | 159 + daemon/logs.go | 135 + daemon/monitor.go | 309 ++ daemon/network_settings.go | 43 + daemon/networkdriver/bridge/driver.go | 544 +++ daemon/networkdriver/bridge/driver_test.go | 120 + daemon/networkdriver/ipallocator/allocator.go | 150 + .../ipallocator/allocator_test.go | 434 ++ daemon/networkdriver/network.go | 10 + daemon/networkdriver/network_test.go | 190 + .../portallocator/portallocator.go | 156 + .../portallocator/portallocator_test.go | 216 + daemon/networkdriver/portmapper/mapper.go | 176 + .../networkdriver/portmapper/mapper_test.go | 152 + daemon/networkdriver/portmapper/mock_proxy.go | 18 + daemon/networkdriver/portmapper/proxy.go | 156 + daemon/networkdriver/utils.go | 118 + daemon/pause.go | 37 + daemon/resize.go | 53 + daemon/restart.go | 27 + daemon/start.go | 73 + daemon/state.go | 206 + daemon/state_test.go | 102 + daemon/stop.go | 30 + daemon/top.go | 79 + daemon/utils.go | 53 + daemon/utils_linux.go | 17 + daemon/utils_nolinux.go | 13 + daemon/utils_test.go | 54 + daemon/volumes.go | 333 ++ daemon/wait.go | 20 + docker/README.md | 3 + docker/client.go | 13 + docker/daemon.go | 93 + docker/docker.go | 119 + docker/flags.go | 101 + dockerinit/dockerinit.go | 12 + dockerversion/dockerversion.go | 15 + docs/.gitignore | 5 + docs/Dockerfile | 49 + docs/MAINTAINERS | 4 + docs/README.md | 162 + docs/docs-update.py | 239 ++ docs/man/Dockerfile | 7 + docs/man/Dockerfile.5.md | 207 + docs/man/README.md | 70 + docs/man/docker-attach.1.md | 61 + docs/man/docker-build.1.md | 121 + docs/man/docker-commit.1.md | 41 + docs/man/docker-cp.1.md | 28 + docs/man/docker-create.1.md | 140 + docs/man/docker-diff.1.md | 47 + docs/man/docker-events.1.md | 61 + docs/man/docker-exec.1.md | 29 + docs/man/docker-export.1.md | 30 + docs/man/docker-history.1.md | 34 + docs/man/docker-images.1.md | 90 + docs/man/docker-import.1.md | 43 + docs/man/docker-info.1.md | 44 + docs/man/docker-inspect.1.md | 229 ++ docs/man/docker-kill.1.md | 24 + docs/man/docker-load.1.md | 38 + docs/man/docker-login.1.md | 38 + docs/man/docker-logout.1.md | 27 + docs/man/docker-logs.1.md | 38 + docs/man/docker-pause.1.md | 27 + docs/man/docker-port.1.md | 32 + docs/man/docker-ps.1.md | 76 + docs/man/docker-pull.1.md | 66 + docs/man/docker-push.1.md | 49 + docs/man/docker-restart.1.md | 22 + docs/man/docker-rm.1.md | 53 + docs/man/docker-rmi.1.md | 38 + docs/man/docker-run.1.md | 427 ++ docs/man/docker-save.1.md | 37 + docs/man/docker-search.1.md | 58 + docs/man/docker-start.1.md | 27 + docs/man/docker-stop.1.md | 23 + docs/man/docker-tag.1.md | 59 + docs/man/docker-top.1.md | 31 + docs/man/docker-unpause.1.md | 24 + docs/man/docker-version.1.md | 15 + docs/man/docker-wait.1.md | 28 + docs/man/docker.1.md | 208 + docs/man/md2man-all.sh | 22 + docs/mkdocs.yml | 150 + docs/release.sh | 134 + docs/s3_website.json | 36 + engine/MAINTAINERS | 1 + engine/engine.go | 260 ++ engine/engine_test.go | 162 + engine/env.go | 297 ++ engine/env_test.go | 324 ++ engine/hack.go | 21 + engine/helpers_test.go | 11 + engine/http.go | 42 + engine/job.go | 238 ++ engine/job_test.go | 75 + engine/shutdown_test.go | 80 + engine/streams.go | 222 ++ engine/streams_test.go | 210 + engine/table.go | 140 + engine/table_test.go | 112 + events/events.go | 176 + events/events_test.go | 154 + graph/MAINTAINERS | 5 + graph/export.go | 168 + graph/graph.go | 397 ++ graph/history.go | 46 + graph/import.go | 61 + graph/list.go | 103 + graph/load.go | 134 + graph/pools_test.go | 49 + graph/pull.go | 601 +++ graph/push.go | 250 ++ graph/service.go | 182 + graph/tag.go | 44 + graph/tags.go | 356 ++ graph/tags_unit_test.go | 150 + graph/viz.go | 38 + hack/CONTRIBUTORS.md | 1 + hack/MAINTAINERS | 4 + hack/MAINTAINERS.md | 130 + hack/PACKAGERS.md | 329 ++ hack/PRINCIPLES.md | 19 + hack/README.md | 24 + hack/RELEASE-CHECKLIST.md | 303 ++ hack/ROADMAP.md | 41 + hack/allmaintainers.sh | 3 + hack/dind | 88 + hack/generate-authors.sh | 15 + hack/getmaintainer.sh | 62 + hack/install.sh | 214 + hack/make.sh | 242 ++ hack/make/.ensure-busybox | 10 + hack/make/.ensure-scratch | 21 + hack/make/.go-compile-test-dir | 26 + hack/make/.validate | 33 + hack/make/README.md | 17 + hack/make/binary | 17 + hack/make/cover | 22 + hack/make/cross | 33 + hack/make/dynbinary | 45 + hack/make/dyntest-integration | 18 + hack/make/dyntest-unit | 18 + hack/make/test-integration | 15 + hack/make/test-integration-cli | 46 + hack/make/test-unit | 84 + hack/make/tgz | 31 + hack/make/ubuntu | 176 + hack/make/validate-dco | 56 + hack/make/validate-gofmt | 30 + hack/release.sh | 389 ++ hack/stats.sh | 22 + hack/vendor.sh | 71 + image/graph.go | 11 + image/image.go | 255 ++ integration-cli/MAINTAINERS | 1 + .../TestCopy/DirContentToExistDir/Dockerfile | 10 + .../DirContentToExistDir/test_dir/test_file | 0 .../TestCopy/DirContentToRoot/Dockerfile | 8 + .../DirContentToRoot/test_dir/test_file | 0 .../TestCopy/DisallowRemote/Dockerfile | 2 + .../build_tests/TestCopy/EtcToRoot/Dockerfile | 2 + .../TestCopy/MultipleFiles/Dockerfile | 17 + .../TestCopy/MultipleFiles/test_file1 | 0 .../TestCopy/MultipleFiles/test_file2 | 0 .../TestCopy/MultipleFiles/test_file3 | 0 .../TestCopy/MultipleFiles/test_file4 | 0 .../TestCopy/MultipleFilesToFile/Dockerfile | 7 + .../TestCopy/MultipleFilesToFile/test_file1 | 0 .../TestCopy/MultipleFilesToFile/test_file2 | 0 .../TestCopy/MultipleFilesToFile/test_file3 | 0 .../TestCopy/SingleFileToExistDir/Dockerfile | 10 + .../TestCopy/SingleFileToExistDir/test_file | 0 .../SingleFileToNonExistDir/Dockerfile | 9 + .../SingleFileToNonExistDir/test_file | 0 .../TestCopy/SingleFileToRoot/Dockerfile | 9 + .../TestCopy/SingleFileToWorkdir/Dockerfile | 2 + .../TestCopy/WholeDirToRoot/Dockerfile | 11 + integration-cli/docker_api_inspect_test.go | 58 + integration-cli/docker_cli_attach_test.go | 89 + integration-cli/docker_cli_build_test.go | 3499 +++++++++++++++++ integration-cli/docker_cli_commit_test.go | 139 + integration-cli/docker_cli_cp_test.go | 411 ++ integration-cli/docker_cli_create_test.go | 116 + integration-cli/docker_cli_daemon_test.go | 94 + integration-cli/docker_cli_diff_test.go | 93 + integration-cli/docker_cli_events_test.go | 217 + integration-cli/docker_cli_exec_test.go | 139 + .../docker_cli_export_import_test.go | 50 + integration-cli/docker_cli_history_test.go | 85 + integration-cli/docker_cli_images_test.go | 62 + integration-cli/docker_cli_import_test.go | 30 + integration-cli/docker_cli_info_test.go | 29 + integration-cli/docker_cli_inspect_test.go | 22 + integration-cli/docker_cli_kill_test.go | 64 + integration-cli/docker_cli_links_test.go | 156 + integration-cli/docker_cli_logs_test.go | 252 ++ integration-cli/docker_cli_nat_test.go | 52 + integration-cli/docker_cli_port_test.go | 125 + integration-cli/docker_cli_ps_test.go | 362 ++ integration-cli/docker_cli_pull_test.go | 32 + integration-cli/docker_cli_push_test.go | 48 + integration-cli/docker_cli_restart_test.go | 127 + integration-cli/docker_cli_rm_test.go | 128 + integration-cli/docker_cli_rmi_test.go | 100 + integration-cli/docker_cli_run_test.go | 2442 ++++++++++++ integration-cli/docker_cli_save_load_test.go | 395 ++ integration-cli/docker_cli_search_test.go | 25 + integration-cli/docker_cli_start_test.go | 67 + integration-cli/docker_cli_tag_test.go | 90 + integration-cli/docker_cli_top_test.go | 91 + integration-cli/docker_cli_version_test.go | 38 + integration-cli/docker_test_vars.go | 47 + integration-cli/docker_utils.go | 714 ++++ integration-cli/utils.go | 268 ++ integration/MAINTAINERS | 2 + integration/README.md | 23 + integration/api_test.go | 1180 ++++++ integration/commands_test.go | 564 +++ integration/container_test.go | 259 ++ integration/fixtures/https/ca.pem | 23 + integration/fixtures/https/client-cert.pem | 73 + integration/fixtures/https/client-key.pem | 16 + .../fixtures/https/client-rogue-cert.pem | 73 + .../fixtures/https/client-rogue-key.pem | 16 + integration/fixtures/https/server-cert.pem | 76 + integration/fixtures/https/server-key.pem | 16 + .../fixtures/https/server-rogue-cert.pem | 76 + .../fixtures/https/server-rogue-key.pem | 16 + integration/graph_test.go | 318 ++ integration/https_test.go | 97 + integration/runtime_test.go | 899 +++++ integration/server_test.go | 295 ++ integration/utils_test.go | 360 ++ integration/z_final_test.go | 17 + links/links.go | 137 + links/links_test.go | 109 + nat/nat.go | 159 + nat/nat_test.go | 201 + nat/sort.go | 28 + nat/sort_test.go | 41 + opts/envfile.go | 54 + opts/ip.go | 31 + opts/opts.go | 229 ++ opts/opts_test.go | 90 + pkg/README.md | 11 + pkg/archive/MAINTAINERS | 2 + pkg/archive/README.md | 1 + pkg/archive/archive.go | 754 ++++ pkg/archive/archive_test.go | 448 +++ pkg/archive/changes.go | 411 ++ pkg/archive/changes_test.go | 301 ++ pkg/archive/diff.go | 167 + pkg/archive/diff_test.go | 191 + pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes pkg/archive/time_linux.go | 16 + pkg/archive/time_unsupported.go | 16 + pkg/archive/utils_test.go | 166 + pkg/archive/wrap.go | 59 + pkg/broadcastwriter/broadcastwriter.go | 101 + pkg/broadcastwriter/broadcastwriter_test.go | 144 + pkg/chrootarchive/archive.go | 111 + pkg/chrootarchive/archive_test.go | 101 + pkg/chrootarchive/diff.go | 60 + pkg/chrootarchive/init.go | 26 + pkg/fileutils/fileutils.go | 26 + pkg/graphdb/MAINTAINERS | 1 + pkg/graphdb/conn_sqlite3.go | 34 + pkg/graphdb/conn_unsupported.go | 7 + pkg/graphdb/graphdb.go | 528 +++ pkg/graphdb/graphdb_test.go | 619 +++ pkg/graphdb/sort.go | 27 + pkg/graphdb/sort_test.go | 29 + pkg/graphdb/utils.go | 32 + pkg/httputils/MAINTAINERS | 1 + pkg/httputils/resumablerequestreader.go | 93 + pkg/ioutils/readers.go | 114 + pkg/ioutils/readers_test.go | 34 + pkg/ioutils/writers.go | 39 + pkg/iptables/MAINTAINERS | 1 + pkg/iptables/iptables.go | 193 + pkg/jsonlog/jsonlog.go | 53 + pkg/jsonlog/jsonlog_marshalling.go | 176 + pkg/jsonlog/jsonlog_test.go | 61 + pkg/listenbuffer/buffer.go | 46 + pkg/log/log.go | 83 + pkg/log/log_test.go | 37 + pkg/mflag/LICENSE | 27 + pkg/mflag/MAINTAINERS | 1 + pkg/mflag/README.md | 40 + pkg/mflag/example/example.go | 36 + pkg/mflag/flag.go | 1003 +++++ pkg/mflag/flag_test.go | 506 +++ pkg/mount/MAINTAINERS | 1 + pkg/mount/flags.go | 62 + pkg/mount/flags_freebsd.go | 28 + pkg/mount/flags_linux.go | 23 + pkg/mount/flags_unsupported.go | 22 + pkg/mount/mount.go | 70 + pkg/mount/mount_test.go | 137 + pkg/mount/mounter_freebsd.go | 59 + pkg/mount/mounter_linux.go | 21 + pkg/mount/mounter_unsupported.go | 11 + pkg/mount/mountinfo.go | 7 + pkg/mount/mountinfo_freebsd.go | 38 + pkg/mount/mountinfo_linux.go | 74 + pkg/mount/mountinfo_linux_test.go | 448 +++ pkg/mount/mountinfo_unsupported.go | 12 + pkg/namesgenerator/names-generator.go | 92 + pkg/namesgenerator/names-generator_test.go | 23 + pkg/networkfs/MAINTAINERS | 1 + pkg/networkfs/etchosts/etchosts.go | 53 + pkg/networkfs/etchosts/etchosts_test.go | 108 + pkg/networkfs/resolvconf/resolvconf.go | 92 + pkg/networkfs/resolvconf/resolvconf_test.go | 158 + pkg/parsers/MAINTAINERS | 1 + pkg/parsers/filters/parse.go | 63 + pkg/parsers/filters/parse_test.go | 78 + pkg/parsers/kernel/kernel.go | 93 + pkg/parsers/kernel/kernel_test.go | 61 + pkg/parsers/kernel/uname_linux.go | 16 + pkg/parsers/kernel/uname_unsupported.go | 15 + .../operatingsystem/operatingsystem.go | 40 + .../operatingsystem/operatingsystem_test.go | 123 + pkg/parsers/parsers.go | 110 + pkg/parsers/parsers_test.go | 83 + pkg/pools/pools.go | 111 + pkg/pools/pools_nopool.go | 73 + pkg/promise/promise.go | 11 + pkg/proxy/MAINTAINERS | 1 + pkg/proxy/network_proxy_test.go | 216 + pkg/proxy/proxy.go | 29 + pkg/proxy/stub_proxy.go | 22 + pkg/proxy/tcp_proxy.go | 89 + pkg/proxy/udp_proxy.go | 157 + pkg/reexec/MAINTAINERS | 1 + pkg/reexec/README.md | 5 + pkg/reexec/command_linux.go | 18 + pkg/reexec/command_unsupported.go | 11 + pkg/reexec/reexec.go | 42 + pkg/signal/signal.go | 19 + pkg/signal/signal_darwin.go | 40 + pkg/signal/signal_freebsd.go | 42 + pkg/signal/signal_linux.go | 43 + pkg/signal/signal_unsupported.go | 9 + pkg/signal/trap.go | 54 + pkg/stdcopy/MAINTAINERS | 1 + pkg/stdcopy/stdcopy.go | 172 + pkg/stdcopy/stdcopy_test.go | 20 + pkg/symlink/LICENSE.APACHE | 191 + pkg/symlink/LICENSE.BSD | 27 + pkg/symlink/MAINTAINERS | 3 + pkg/symlink/README.md | 5 + pkg/symlink/fs.go | 131 + pkg/symlink/fs_test.go | 402 ++ pkg/sysinfo/MAINTAINERS | 2 + pkg/sysinfo/sysinfo.go | 47 + pkg/system/MAINTAINERS | 2 + pkg/system/errors.go | 9 + pkg/system/stat_linux.go | 13 + pkg/system/stat_unsupported.go | 13 + pkg/system/utimes_darwin.go | 11 + pkg/system/utimes_freebsd.go | 24 + pkg/system/utimes_linux.go | 28 + pkg/system/utimes_test.go | 64 + pkg/system/utimes_unsupported.go | 13 + pkg/system/xattrs_linux.go | 59 + pkg/system/xattrs_unsupported.go | 11 + pkg/systemd/MAINTAINERS | 1 + pkg/systemd/booted.go | 15 + pkg/systemd/listendfd.go | 40 + pkg/systemd/sd_notify.go | 33 + pkg/tailfile/tailfile.go | 61 + pkg/tailfile/tailfile_test.go | 148 + pkg/tarsum/MAINTAINER | 1 + pkg/tarsum/fileinfosums.go | 125 + pkg/tarsum/fileinfosums_test.go | 45 + pkg/tarsum/tarsum.go | 285 ++ pkg/tarsum/tarsum_test.go | 408 ++ .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes pkg/tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes pkg/tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes pkg/tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes pkg/tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes pkg/tarsum/testdata/xattr/json | 1 + pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes pkg/tarsum/versioning.go | 56 + pkg/tarsum/versioning_test.go | 49 + pkg/tarsum/writercloser.go | 22 + pkg/term/MAINTAINERS | 1 + pkg/term/term.go | 103 + pkg/term/termios_darwin.go | 65 + pkg/term/termios_freebsd.go | 65 + pkg/term/termios_linux.go | 44 + pkg/testutils/MAINTAINERS | 2 + pkg/testutils/README.md | 2 + pkg/testutils/utils.go | 37 + pkg/timeutils/MAINTAINERS | 1 + pkg/timeutils/json.go | 23 + pkg/truncindex/MAINTAINERS | 1 + pkg/truncindex/truncindex.go | 106 + pkg/truncindex/truncindex_test.go | 401 ++ pkg/units/MAINTAINERS | 2 + pkg/units/duration.go | 31 + pkg/units/duration_test.go | 46 + pkg/units/size.go | 81 + pkg/units/size_test.go | 98 + pkg/version/version.go | 57 + pkg/version/version_test.go | 27 + registry/MAINTAINERS | 5 + registry/auth.go | 310 ++ registry/auth_test.go | 149 + registry/endpoint.go | 213 + registry/endpoint_test.go | 27 + registry/httpfactory.go | 46 + registry/registry.go | 253 ++ registry/registry_mock_test.go | 394 ++ registry/registry_test.go | 355 ++ registry/service.go | 118 + registry/session.go | 617 +++ registry/session_v2.go | 390 ++ registry/types.go | 67 + runconfig/compare.go | 60 + runconfig/config.go | 71 + runconfig/config_test.go | 264 ++ runconfig/exec.go | 75 + runconfig/hostconfig.go | 121 + runconfig/merge.go | 107 + runconfig/parse.go | 406 ++ runconfig/parse_test.go | 60 + trust/service.go | 74 + trust/trusts.go | 199 + utils/daemon.go | 36 + utils/http.go | 164 + utils/jsonmessage.go | 169 + utils/jsonmessage_test.go | 38 + utils/progressreader.go | 55 + utils/random.go | 16 + utils/streamformatter.go | 112 + utils/streamformatter_test.go | 67 + utils/timeoutconn.go | 26 + utils/timeoutconn_test.go | 33 + utils/tmpdir.go | 12 + utils/tmpdir_unix.go | 18 + utils/utils.go | 542 +++ utils/utils_test.go | 99 + volumes/MAINTAINERS | 1 + volumes/repository.go | 217 + volumes/volume.go | 139 + 670 files changed, 80391 insertions(+) create mode 100644 .dockerignore create mode 100755 .drone.yml create mode 100644 .gitignore create mode 100644 .mailmap create mode 100644 .travis.yml create mode 100644 AUTHORS create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 MAINTAINERS create mode 100644 Makefile create mode 100644 NOTICE create mode 100644 README.md create mode 100644 VERSION create mode 100644 api/MAINTAINERS create mode 100644 api/README.md create mode 100644 api/api_unit_test.go create mode 100644 api/client/cli.go create mode 100644 api/client/commands.go create mode 100644 api/client/hijack.go create mode 100644 api/client/utils.go create mode 100644 api/common.go create mode 100644 api/server/MAINTAINERS create mode 100644 api/server/server.go create mode 100644 api/server/server_unit_test.go create mode 100644 builder/MAINTAINERS create mode 100644 builder/dispatchers.go create mode 100644 builder/evaluator.go create mode 100644 builder/internals.go create mode 100644 builder/job.go create mode 100644 builder/parser/dumper/main.go create mode 100644 builder/parser/line_parsers.go create mode 100644 builder/parser/parser.go create mode 100644 builder/parser/parser_test.go create mode 100644 builder/parser/testfiles-negative/env_equals_env/Dockerfile create mode 100644 builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile create mode 100644 builder/parser/testfiles-negative/shykes-nested-json/Dockerfile create mode 100644 builder/parser/testfiles/brimstone-consuldock/Dockerfile create mode 100644 builder/parser/testfiles/brimstone-consuldock/result create mode 100644 builder/parser/testfiles/brimstone-docker-consul/Dockerfile create mode 100644 builder/parser/testfiles/brimstone-docker-consul/result create mode 100644 builder/parser/testfiles/continueIndent/Dockerfile create mode 100644 builder/parser/testfiles/continueIndent/result create mode 100644 builder/parser/testfiles/cpuguy83-nagios/Dockerfile create mode 100644 builder/parser/testfiles/cpuguy83-nagios/result create mode 100644 builder/parser/testfiles/docker/Dockerfile create mode 100644 builder/parser/testfiles/docker/result create mode 100644 builder/parser/testfiles/escapes/Dockerfile create mode 100644 builder/parser/testfiles/escapes/result create mode 100644 builder/parser/testfiles/influxdb/Dockerfile create mode 100644 builder/parser/testfiles/influxdb/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-single-quotes/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result create mode 100644 builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile create mode 100644 builder/parser/testfiles/kartar-entrypoint-oddities/result create mode 100644 builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile create mode 100644 builder/parser/testfiles/lk4d4-the-edge-case-generator/result create mode 100644 builder/parser/testfiles/mail/Dockerfile create mode 100644 builder/parser/testfiles/mail/result create mode 100644 builder/parser/testfiles/multiple-volumes/Dockerfile create mode 100644 builder/parser/testfiles/multiple-volumes/result create mode 100644 builder/parser/testfiles/mumble/Dockerfile create mode 100644 builder/parser/testfiles/mumble/result create mode 100644 builder/parser/testfiles/nginx/Dockerfile create mode 100644 builder/parser/testfiles/nginx/result create mode 100644 builder/parser/testfiles/tf2/Dockerfile create mode 100644 builder/parser/testfiles/tf2/result create mode 100644 builder/parser/testfiles/weechat/Dockerfile create mode 100644 builder/parser/testfiles/weechat/result create mode 100644 builder/parser/testfiles/znc/Dockerfile create mode 100644 builder/parser/testfiles/znc/result create mode 100644 builder/parser/utils.go create mode 100644 builder/support.go create mode 100644 builtins/builtins.go create mode 100644 contrib/MAINTAINERS create mode 100644 contrib/README create mode 100755 contrib/check-config.sh create mode 100755 contrib/completion/bash/docker create mode 100644 contrib/completion/fish/docker.fish create mode 100644 contrib/completion/zsh/_docker create mode 100644 contrib/desktop-integration/README.md create mode 100644 contrib/desktop-integration/chromium/Dockerfile create mode 100644 contrib/desktop-integration/gparted/Dockerfile create mode 100644 contrib/docker-device-tool/device_tool.go create mode 100644 contrib/host-integration/Dockerfile.dev create mode 100644 contrib/host-integration/Dockerfile.min create mode 100644 contrib/host-integration/manager.go create mode 100755 contrib/host-integration/manager.sh create mode 100755 contrib/host-integration/manager/systemd create mode 100755 contrib/host-integration/manager/upstart create mode 100644 contrib/init/openrc/docker.confd create mode 100755 contrib/init/openrc/docker.initd create mode 100644 contrib/init/systemd/MAINTAINERS create mode 100644 contrib/init/systemd/docker.service create mode 100644 contrib/init/systemd/docker.socket create mode 100755 contrib/init/sysvinit-debian/docker create mode 100644 contrib/init/sysvinit-debian/docker.default create mode 100755 contrib/init/sysvinit-redhat/docker create mode 100644 contrib/init/sysvinit-redhat/docker.sysconfig create mode 100644 contrib/init/upstart/docker.conf create mode 100755 contrib/mkimage-alpine.sh create mode 100644 contrib/mkimage-arch-pacman.conf create mode 100755 contrib/mkimage-arch.sh create mode 100755 contrib/mkimage-busybox.sh create mode 100755 contrib/mkimage-crux.sh create mode 100755 contrib/mkimage-debootstrap.sh create mode 100755 contrib/mkimage-rinse.sh create mode 100755 contrib/mkimage-unittest.sh create mode 100755 contrib/mkimage-yum.sh create mode 100755 contrib/mkimage.sh create mode 100755 contrib/mkimage/.febootstrap-minimize create mode 100755 contrib/mkimage/busybox-static create mode 100755 contrib/mkimage/debootstrap create mode 100755 contrib/mkimage/mageia-urpmi create mode 100755 contrib/mkimage/rinse create mode 100755 contrib/mkseccomp.pl create mode 100644 contrib/mkseccomp.sample create mode 100755 contrib/nuke-graph-directory.sh create mode 100644 contrib/syntax/kate/Dockerfile.xml create mode 100644 contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences create mode 100644 contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage create mode 100644 contrib/syntax/textmate/Docker.tmbundle/info.plist create mode 100644 contrib/syntax/textmate/MAINTAINERS create mode 100644 contrib/syntax/textmate/README.md create mode 100644 contrib/syntax/vim/LICENSE create mode 100644 contrib/syntax/vim/README.md create mode 100644 contrib/syntax/vim/doc/dockerfile.txt create mode 100644 contrib/syntax/vim/ftdetect/dockerfile.vim create mode 100644 contrib/syntax/vim/syntax/dockerfile.vim create mode 100644 contrib/udev/80-docker.rules create mode 100644 contrib/vagrant-docker/README.md create mode 100644 daemon/MAINTAINERS create mode 100644 daemon/README.md create mode 100644 daemon/attach.go create mode 100644 daemon/changes.go create mode 100644 daemon/commit.go create mode 100644 daemon/config.go create mode 100644 daemon/container.go create mode 100644 daemon/container_unit_test.go create mode 100644 daemon/copy.go create mode 100644 daemon/create.go create mode 100644 daemon/daemon.go create mode 100644 daemon/daemon_aufs.go create mode 100644 daemon/daemon_btrfs.go create mode 100644 daemon/daemon_devicemapper.go create mode 100644 daemon/daemon_no_aufs.go create mode 100644 daemon/daemon_unit_test.go create mode 100644 daemon/delete.go create mode 100644 daemon/exec.go create mode 100644 daemon/execdriver/MAINTAINERS create mode 100644 daemon/execdriver/driver.go create mode 100644 daemon/execdriver/execdrivers/execdrivers.go create mode 100644 daemon/execdriver/lxc/MAINTAINERS create mode 100644 daemon/execdriver/lxc/driver.go create mode 100644 daemon/execdriver/lxc/info.go create mode 100644 daemon/execdriver/lxc/info_test.go create mode 100644 daemon/execdriver/lxc/init.go create mode 100644 daemon/execdriver/lxc/lxc_init_linux.go create mode 100644 daemon/execdriver/lxc/lxc_init_unsupported.go create mode 100644 daemon/execdriver/lxc/lxc_template.go create mode 100644 daemon/execdriver/lxc/lxc_template_unit_test.go create mode 100644 daemon/execdriver/native/create.go create mode 100644 daemon/execdriver/native/driver.go create mode 100644 daemon/execdriver/native/driver_unsupported.go create mode 100644 daemon/execdriver/native/driver_unsupported_nocgo.go create mode 100644 daemon/execdriver/native/exec.go create mode 100644 daemon/execdriver/native/info.go create mode 100644 daemon/execdriver/native/init.go create mode 100644 daemon/execdriver/native/template/default_template.go create mode 100644 daemon/execdriver/native/utils.go create mode 100644 daemon/execdriver/pipes.go create mode 100644 daemon/execdriver/termconsole.go create mode 100644 daemon/execdriver/utils.go create mode 100644 daemon/export.go create mode 100644 daemon/graphdriver/aufs/aufs.go create mode 100644 daemon/graphdriver/aufs/aufs_test.go create mode 100644 daemon/graphdriver/aufs/dirs.go create mode 100644 daemon/graphdriver/aufs/migrate.go create mode 100644 daemon/graphdriver/aufs/mount.go create mode 100644 daemon/graphdriver/aufs/mount_linux.go create mode 100644 daemon/graphdriver/aufs/mount_unsupported.go create mode 100644 daemon/graphdriver/btrfs/MAINTAINERS create mode 100644 daemon/graphdriver/btrfs/btrfs.go create mode 100644 daemon/graphdriver/btrfs/btrfs_test.go create mode 100644 daemon/graphdriver/btrfs/dummy_unsupported.go create mode 100644 daemon/graphdriver/devmapper/MAINTAINERS create mode 100644 daemon/graphdriver/devmapper/README.md create mode 100644 daemon/graphdriver/devmapper/attach_loopback.go create mode 100644 daemon/graphdriver/devmapper/deviceset.go create mode 100644 daemon/graphdriver/devmapper/devmapper.go create mode 100644 daemon/graphdriver/devmapper/devmapper_doc.go create mode 100644 daemon/graphdriver/devmapper/devmapper_log.go create mode 100644 daemon/graphdriver/devmapper/devmapper_test.go create mode 100644 daemon/graphdriver/devmapper/devmapper_wrapper.go create mode 100644 daemon/graphdriver/devmapper/driver.go create mode 100644 daemon/graphdriver/devmapper/ioctl.go create mode 100644 daemon/graphdriver/devmapper/mount.go create mode 100644 daemon/graphdriver/driver.go create mode 100644 daemon/graphdriver/fsdiff.go create mode 100644 daemon/graphdriver/graphtest/graphtest.go create mode 100644 daemon/graphdriver/vfs/driver.go create mode 100644 daemon/graphdriver/vfs/vfs_test.go create mode 100644 daemon/history.go create mode 100644 daemon/image_delete.go create mode 100644 daemon/info.go create mode 100644 daemon/inspect.go create mode 100644 daemon/kill.go create mode 100644 daemon/list.go create mode 100644 daemon/logs.go create mode 100644 daemon/monitor.go create mode 100644 daemon/network_settings.go create mode 100644 daemon/networkdriver/bridge/driver.go create mode 100644 daemon/networkdriver/bridge/driver_test.go create mode 100644 daemon/networkdriver/ipallocator/allocator.go create mode 100644 daemon/networkdriver/ipallocator/allocator_test.go create mode 100644 daemon/networkdriver/network.go create mode 100644 daemon/networkdriver/network_test.go create mode 100644 daemon/networkdriver/portallocator/portallocator.go create mode 100644 daemon/networkdriver/portallocator/portallocator_test.go create mode 100644 daemon/networkdriver/portmapper/mapper.go create mode 100644 daemon/networkdriver/portmapper/mapper_test.go create mode 100644 daemon/networkdriver/portmapper/mock_proxy.go create mode 100644 daemon/networkdriver/portmapper/proxy.go create mode 100644 daemon/networkdriver/utils.go create mode 100644 daemon/pause.go create mode 100644 daemon/resize.go create mode 100644 daemon/restart.go create mode 100644 daemon/start.go create mode 100644 daemon/state.go create mode 100644 daemon/state_test.go create mode 100644 daemon/stop.go create mode 100644 daemon/top.go create mode 100644 daemon/utils.go create mode 100644 daemon/utils_linux.go create mode 100644 daemon/utils_nolinux.go create mode 100644 daemon/utils_test.go create mode 100644 daemon/volumes.go create mode 100644 daemon/wait.go create mode 100644 docker/README.md create mode 100644 docker/client.go create mode 100644 docker/daemon.go create mode 100644 docker/docker.go create mode 100644 docker/flags.go create mode 100644 dockerinit/dockerinit.go create mode 100644 dockerversion/dockerversion.go create mode 100644 docs/.gitignore create mode 100644 docs/Dockerfile create mode 100644 docs/MAINTAINERS create mode 100755 docs/README.md create mode 100755 docs/docs-update.py create mode 100644 docs/man/Dockerfile create mode 100644 docs/man/Dockerfile.5.md create mode 100644 docs/man/README.md create mode 100644 docs/man/docker-attach.1.md create mode 100644 docs/man/docker-build.1.md create mode 100644 docs/man/docker-commit.1.md create mode 100644 docs/man/docker-cp.1.md create mode 100644 docs/man/docker-create.1.md create mode 100644 docs/man/docker-diff.1.md create mode 100644 docs/man/docker-events.1.md create mode 100644 docs/man/docker-exec.1.md create mode 100644 docs/man/docker-export.1.md create mode 100644 docs/man/docker-history.1.md create mode 100644 docs/man/docker-images.1.md create mode 100644 docs/man/docker-import.1.md create mode 100644 docs/man/docker-info.1.md create mode 100644 docs/man/docker-inspect.1.md create mode 100644 docs/man/docker-kill.1.md create mode 100644 docs/man/docker-load.1.md create mode 100644 docs/man/docker-login.1.md create mode 100644 docs/man/docker-logout.1.md create mode 100644 docs/man/docker-logs.1.md create mode 100644 docs/man/docker-pause.1.md create mode 100644 docs/man/docker-port.1.md create mode 100644 docs/man/docker-ps.1.md create mode 100644 docs/man/docker-pull.1.md create mode 100644 docs/man/docker-push.1.md create mode 100644 docs/man/docker-restart.1.md create mode 100644 docs/man/docker-rm.1.md create mode 100644 docs/man/docker-rmi.1.md create mode 100644 docs/man/docker-run.1.md create mode 100644 docs/man/docker-save.1.md create mode 100644 docs/man/docker-search.1.md create mode 100644 docs/man/docker-start.1.md create mode 100644 docs/man/docker-stop.1.md create mode 100644 docs/man/docker-tag.1.md create mode 100644 docs/man/docker-top.1.md create mode 100644 docs/man/docker-unpause.1.md create mode 100644 docs/man/docker-version.1.md create mode 100644 docs/man/docker-wait.1.md create mode 100644 docs/man/docker.1.md create mode 100755 docs/man/md2man-all.sh create mode 100644 docs/mkdocs.yml create mode 100755 docs/release.sh create mode 100644 docs/s3_website.json create mode 100644 engine/MAINTAINERS create mode 100644 engine/engine.go create mode 100644 engine/engine_test.go create mode 100644 engine/env.go create mode 100644 engine/env_test.go create mode 100644 engine/hack.go create mode 100644 engine/helpers_test.go create mode 100644 engine/http.go create mode 100644 engine/job.go create mode 100644 engine/job_test.go create mode 100644 engine/shutdown_test.go create mode 100644 engine/streams.go create mode 100644 engine/streams_test.go create mode 100644 engine/table.go create mode 100644 engine/table_test.go create mode 100644 events/events.go create mode 100644 events/events_test.go create mode 100644 graph/MAINTAINERS create mode 100644 graph/export.go create mode 100644 graph/graph.go create mode 100644 graph/history.go create mode 100644 graph/import.go create mode 100644 graph/list.go create mode 100644 graph/load.go create mode 100644 graph/pools_test.go create mode 100644 graph/pull.go create mode 100644 graph/push.go create mode 100644 graph/service.go create mode 100644 graph/tag.go create mode 100644 graph/tags.go create mode 100644 graph/tags_unit_test.go create mode 100644 graph/viz.go create mode 120000 hack/CONTRIBUTORS.md create mode 100644 hack/MAINTAINERS create mode 100644 hack/MAINTAINERS.md create mode 100644 hack/PACKAGERS.md create mode 100644 hack/PRINCIPLES.md create mode 100644 hack/README.md create mode 100644 hack/RELEASE-CHECKLIST.md create mode 100644 hack/ROADMAP.md create mode 100755 hack/allmaintainers.sh create mode 100755 hack/dind create mode 100755 hack/generate-authors.sh create mode 100755 hack/getmaintainer.sh create mode 100755 hack/install.sh create mode 100755 hack/make.sh create mode 100644 hack/make/.ensure-busybox create mode 100644 hack/make/.ensure-scratch create mode 100755 hack/make/.go-compile-test-dir create mode 100644 hack/make/.validate create mode 100644 hack/make/README.md create mode 100755 hack/make/binary create mode 100644 hack/make/cover create mode 100644 hack/make/cross create mode 100644 hack/make/dynbinary create mode 100644 hack/make/dyntest-integration create mode 100644 hack/make/dyntest-unit create mode 100644 hack/make/test-integration create mode 100644 hack/make/test-integration-cli create mode 100644 hack/make/test-unit create mode 100644 hack/make/tgz create mode 100644 hack/make/ubuntu create mode 100644 hack/make/validate-dco create mode 100644 hack/make/validate-gofmt create mode 100755 hack/release.sh create mode 100755 hack/stats.sh create mode 100755 hack/vendor.sh create mode 100644 image/graph.go create mode 100644 image/image.go create mode 100644 integration-cli/MAINTAINERS create mode 100644 integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file create mode 100644 integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file create mode 100644 integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/test_file1 create mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/test_file2 create mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/test_file3 create mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/test_file4 create mode 100644 integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file1 create mode 100644 integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file2 create mode 100644 integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file3 create mode 100644 integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file create mode 100644 integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file create mode 100644 integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile create mode 100644 integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile create mode 100644 integration-cli/docker_api_inspect_test.go create mode 100644 integration-cli/docker_cli_attach_test.go create mode 100644 integration-cli/docker_cli_build_test.go create mode 100644 integration-cli/docker_cli_commit_test.go create mode 100644 integration-cli/docker_cli_cp_test.go create mode 100644 integration-cli/docker_cli_create_test.go create mode 100644 integration-cli/docker_cli_daemon_test.go create mode 100644 integration-cli/docker_cli_diff_test.go create mode 100644 integration-cli/docker_cli_events_test.go create mode 100644 integration-cli/docker_cli_exec_test.go create mode 100644 integration-cli/docker_cli_export_import_test.go create mode 100644 integration-cli/docker_cli_history_test.go create mode 100644 integration-cli/docker_cli_images_test.go create mode 100644 integration-cli/docker_cli_import_test.go create mode 100644 integration-cli/docker_cli_info_test.go create mode 100644 integration-cli/docker_cli_inspect_test.go create mode 100644 integration-cli/docker_cli_kill_test.go create mode 100644 integration-cli/docker_cli_links_test.go create mode 100644 integration-cli/docker_cli_logs_test.go create mode 100644 integration-cli/docker_cli_nat_test.go create mode 100644 integration-cli/docker_cli_port_test.go create mode 100644 integration-cli/docker_cli_ps_test.go create mode 100644 integration-cli/docker_cli_pull_test.go create mode 100644 integration-cli/docker_cli_push_test.go create mode 100644 integration-cli/docker_cli_restart_test.go create mode 100644 integration-cli/docker_cli_rm_test.go create mode 100644 integration-cli/docker_cli_rmi_test.go create mode 100644 integration-cli/docker_cli_run_test.go create mode 100644 integration-cli/docker_cli_save_load_test.go create mode 100644 integration-cli/docker_cli_search_test.go create mode 100644 integration-cli/docker_cli_start_test.go create mode 100644 integration-cli/docker_cli_tag_test.go create mode 100644 integration-cli/docker_cli_top_test.go create mode 100644 integration-cli/docker_cli_version_test.go create mode 100644 integration-cli/docker_test_vars.go create mode 100644 integration-cli/docker_utils.go create mode 100644 integration-cli/utils.go create mode 100644 integration/MAINTAINERS create mode 100644 integration/README.md create mode 100644 integration/api_test.go create mode 100644 integration/commands_test.go create mode 100644 integration/container_test.go create mode 100644 integration/fixtures/https/ca.pem create mode 100644 integration/fixtures/https/client-cert.pem create mode 100644 integration/fixtures/https/client-key.pem create mode 100644 integration/fixtures/https/client-rogue-cert.pem create mode 100644 integration/fixtures/https/client-rogue-key.pem create mode 100644 integration/fixtures/https/server-cert.pem create mode 100644 integration/fixtures/https/server-key.pem create mode 100644 integration/fixtures/https/server-rogue-cert.pem create mode 100644 integration/fixtures/https/server-rogue-key.pem create mode 100644 integration/graph_test.go create mode 100644 integration/https_test.go create mode 100644 integration/runtime_test.go create mode 100644 integration/server_test.go create mode 100644 integration/utils_test.go create mode 100644 integration/z_final_test.go create mode 100644 links/links.go create mode 100644 links/links_test.go create mode 100644 nat/nat.go create mode 100644 nat/nat_test.go create mode 100644 nat/sort.go create mode 100644 nat/sort_test.go create mode 100644 opts/envfile.go create mode 100644 opts/ip.go create mode 100644 opts/opts.go create mode 100644 opts/opts_test.go create mode 100644 pkg/README.md create mode 100644 pkg/archive/MAINTAINERS create mode 100644 pkg/archive/README.md create mode 100644 pkg/archive/archive.go create mode 100644 pkg/archive/archive_test.go create mode 100644 pkg/archive/changes.go create mode 100644 pkg/archive/changes_test.go create mode 100644 pkg/archive/diff.go create mode 100644 pkg/archive/diff_test.go create mode 100644 pkg/archive/testdata/broken.tar create mode 100644 pkg/archive/time_linux.go create mode 100644 pkg/archive/time_unsupported.go create mode 100644 pkg/archive/utils_test.go create mode 100644 pkg/archive/wrap.go create mode 100644 pkg/broadcastwriter/broadcastwriter.go create mode 100644 pkg/broadcastwriter/broadcastwriter_test.go create mode 100644 pkg/chrootarchive/archive.go create mode 100644 pkg/chrootarchive/archive_test.go create mode 100644 pkg/chrootarchive/diff.go create mode 100644 pkg/chrootarchive/init.go create mode 100644 pkg/fileutils/fileutils.go create mode 100644 pkg/graphdb/MAINTAINERS create mode 100644 pkg/graphdb/conn_sqlite3.go create mode 100644 pkg/graphdb/conn_unsupported.go create mode 100644 pkg/graphdb/graphdb.go create mode 100644 pkg/graphdb/graphdb_test.go create mode 100644 pkg/graphdb/sort.go create mode 100644 pkg/graphdb/sort_test.go create mode 100644 pkg/graphdb/utils.go create mode 100644 pkg/httputils/MAINTAINERS create mode 100644 pkg/httputils/resumablerequestreader.go create mode 100644 pkg/ioutils/readers.go create mode 100644 pkg/ioutils/readers_test.go create mode 100644 pkg/ioutils/writers.go create mode 100644 pkg/iptables/MAINTAINERS create mode 100644 pkg/iptables/iptables.go create mode 100644 pkg/jsonlog/jsonlog.go create mode 100644 pkg/jsonlog/jsonlog_marshalling.go create mode 100644 pkg/jsonlog/jsonlog_test.go create mode 100644 pkg/listenbuffer/buffer.go create mode 100644 pkg/log/log.go create mode 100644 pkg/log/log_test.go create mode 100644 pkg/mflag/LICENSE create mode 100644 pkg/mflag/MAINTAINERS create mode 100644 pkg/mflag/README.md create mode 100644 pkg/mflag/example/example.go create mode 100644 pkg/mflag/flag.go create mode 100644 pkg/mflag/flag_test.go create mode 100644 pkg/mount/MAINTAINERS create mode 100644 pkg/mount/flags.go create mode 100644 pkg/mount/flags_freebsd.go create mode 100644 pkg/mount/flags_linux.go create mode 100644 pkg/mount/flags_unsupported.go create mode 100644 pkg/mount/mount.go create mode 100644 pkg/mount/mount_test.go create mode 100644 pkg/mount/mounter_freebsd.go create mode 100644 pkg/mount/mounter_linux.go create mode 100644 pkg/mount/mounter_unsupported.go create mode 100644 pkg/mount/mountinfo.go create mode 100644 pkg/mount/mountinfo_freebsd.go create mode 100644 pkg/mount/mountinfo_linux.go create mode 100644 pkg/mount/mountinfo_linux_test.go create mode 100644 pkg/mount/mountinfo_unsupported.go create mode 100644 pkg/namesgenerator/names-generator.go create mode 100644 pkg/namesgenerator/names-generator_test.go create mode 100644 pkg/networkfs/MAINTAINERS create mode 100644 pkg/networkfs/etchosts/etchosts.go create mode 100644 pkg/networkfs/etchosts/etchosts_test.go create mode 100644 pkg/networkfs/resolvconf/resolvconf.go create mode 100644 pkg/networkfs/resolvconf/resolvconf_test.go create mode 100644 pkg/parsers/MAINTAINERS create mode 100644 pkg/parsers/filters/parse.go create mode 100644 pkg/parsers/filters/parse_test.go create mode 100644 pkg/parsers/kernel/kernel.go create mode 100644 pkg/parsers/kernel/kernel_test.go create mode 100644 pkg/parsers/kernel/uname_linux.go create mode 100644 pkg/parsers/kernel/uname_unsupported.go create mode 100644 pkg/parsers/operatingsystem/operatingsystem.go create mode 100644 pkg/parsers/operatingsystem/operatingsystem_test.go create mode 100644 pkg/parsers/parsers.go create mode 100644 pkg/parsers/parsers_test.go create mode 100644 pkg/pools/pools.go create mode 100644 pkg/pools/pools_nopool.go create mode 100644 pkg/promise/promise.go create mode 100644 pkg/proxy/MAINTAINERS create mode 100644 pkg/proxy/network_proxy_test.go create mode 100644 pkg/proxy/proxy.go create mode 100644 pkg/proxy/stub_proxy.go create mode 100644 pkg/proxy/tcp_proxy.go create mode 100644 pkg/proxy/udp_proxy.go create mode 100644 pkg/reexec/MAINTAINERS create mode 100644 pkg/reexec/README.md create mode 100644 pkg/reexec/command_linux.go create mode 100644 pkg/reexec/command_unsupported.go create mode 100644 pkg/reexec/reexec.go create mode 100644 pkg/signal/signal.go create mode 100644 pkg/signal/signal_darwin.go create mode 100644 pkg/signal/signal_freebsd.go create mode 100644 pkg/signal/signal_linux.go create mode 100644 pkg/signal/signal_unsupported.go create mode 100644 pkg/signal/trap.go create mode 100644 pkg/stdcopy/MAINTAINERS create mode 100644 pkg/stdcopy/stdcopy.go create mode 100644 pkg/stdcopy/stdcopy_test.go create mode 100644 pkg/symlink/LICENSE.APACHE create mode 100644 pkg/symlink/LICENSE.BSD create mode 100644 pkg/symlink/MAINTAINERS create mode 100644 pkg/symlink/README.md create mode 100644 pkg/symlink/fs.go create mode 100644 pkg/symlink/fs_test.go create mode 100644 pkg/sysinfo/MAINTAINERS create mode 100644 pkg/sysinfo/sysinfo.go create mode 100644 pkg/system/MAINTAINERS create mode 100644 pkg/system/errors.go create mode 100644 pkg/system/stat_linux.go create mode 100644 pkg/system/stat_unsupported.go create mode 100644 pkg/system/utimes_darwin.go create mode 100644 pkg/system/utimes_freebsd.go create mode 100644 pkg/system/utimes_linux.go create mode 100644 pkg/system/utimes_test.go create mode 100644 pkg/system/utimes_unsupported.go create mode 100644 pkg/system/xattrs_linux.go create mode 100644 pkg/system/xattrs_unsupported.go create mode 100644 pkg/systemd/MAINTAINERS create mode 100644 pkg/systemd/booted.go create mode 100644 pkg/systemd/listendfd.go create mode 100644 pkg/systemd/sd_notify.go create mode 100644 pkg/tailfile/tailfile.go create mode 100644 pkg/tailfile/tailfile_test.go create mode 100644 pkg/tarsum/MAINTAINER create mode 100644 pkg/tarsum/fileinfosums.go create mode 100644 pkg/tarsum/fileinfosums_test.go create mode 100644 pkg/tarsum/tarsum.go create mode 100644 pkg/tarsum/tarsum_test.go create mode 100644 pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 pkg/tarsum/testdata/xattr/json create mode 100644 pkg/tarsum/testdata/xattr/layer.tar create mode 100644 pkg/tarsum/versioning.go create mode 100644 pkg/tarsum/versioning_test.go create mode 100644 pkg/tarsum/writercloser.go create mode 100644 pkg/term/MAINTAINERS create mode 100644 pkg/term/term.go create mode 100644 pkg/term/termios_darwin.go create mode 100644 pkg/term/termios_freebsd.go create mode 100644 pkg/term/termios_linux.go create mode 100644 pkg/testutils/MAINTAINERS create mode 100644 pkg/testutils/README.md create mode 100644 pkg/testutils/utils.go create mode 100644 pkg/timeutils/MAINTAINERS create mode 100644 pkg/timeutils/json.go create mode 100644 pkg/truncindex/MAINTAINERS create mode 100644 pkg/truncindex/truncindex.go create mode 100644 pkg/truncindex/truncindex_test.go create mode 100644 pkg/units/MAINTAINERS create mode 100644 pkg/units/duration.go create mode 100644 pkg/units/duration_test.go create mode 100644 pkg/units/size.go create mode 100644 pkg/units/size_test.go create mode 100644 pkg/version/version.go create mode 100644 pkg/version/version_test.go create mode 100644 registry/MAINTAINERS create mode 100644 registry/auth.go create mode 100644 registry/auth_test.go create mode 100644 registry/endpoint.go create mode 100644 registry/endpoint_test.go create mode 100644 registry/httpfactory.go create mode 100644 registry/registry.go create mode 100644 registry/registry_mock_test.go create mode 100644 registry/registry_test.go create mode 100644 registry/service.go create mode 100644 registry/session.go create mode 100644 registry/session_v2.go create mode 100644 registry/types.go create mode 100644 runconfig/compare.go create mode 100644 runconfig/config.go create mode 100644 runconfig/config_test.go create mode 100644 runconfig/exec.go create mode 100644 runconfig/hostconfig.go create mode 100644 runconfig/merge.go create mode 100644 runconfig/parse.go create mode 100644 runconfig/parse_test.go create mode 100644 trust/service.go create mode 100644 trust/trusts.go create mode 100644 utils/daemon.go create mode 100644 utils/http.go create mode 100644 utils/jsonmessage.go create mode 100644 utils/jsonmessage_test.go create mode 100644 utils/progressreader.go create mode 100644 utils/random.go create mode 100644 utils/streamformatter.go create mode 100644 utils/streamformatter_test.go create mode 100644 utils/timeoutconn.go create mode 100644 utils/timeoutconn_test.go create mode 100644 utils/tmpdir.go create mode 100644 utils/tmpdir_unix.go create mode 100644 utils/utils.go create mode 100644 utils/utils_test.go create mode 100644 volumes/MAINTAINERS create mode 100644 volumes/repository.go create mode 100644 volumes/volume.go diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..37abdef4 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +bundles +.gopath diff --git a/.drone.yml b/.drone.yml new file mode 100755 index 00000000..53e00e0f --- /dev/null +++ b/.drone.yml @@ -0,0 +1,14 @@ +image: dockercore/docker +env: + - AUTO_GOPATH=1 + - DOCKER_GRAPHDRIVER=vfs + - DOCKER_EXECDRIVER=native +script: +# Setup the DockerInDocker environment. + - hack/dind +# Tests relying on StartWithBusybox make Drone time out. + - rm integration-cli/docker_cli_daemon_test.go + - rm integration-cli/docker_cli_exec_test.go +# Validate and test. + - hack/make.sh validate-dco validate-gofmt + - hack/make.sh binary cross test-unit test-integration-cli test-integration diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..2a86e41c --- /dev/null +++ b/.gitignore @@ -0,0 +1,29 @@ +# Docker project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +.vagrant* +bin +docker/docker +.*.swp +a.out +*.orig +build_src +.flymake* +.idea +.DS_Store +docs/_build +docs/_static +docs/_templates +.gopath/ +.dotcloud +*.test +bundles/ +.hg/ +.git/ +vendor/pkg/ +pyenv +Vagrantfile +docs/AWS_S3_BUCKET +docs/GIT_BRANCH +docs/VERSION +docs/GITCOMMIT diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..47860de4 --- /dev/null +++ b/.mailmap @@ -0,0 +1,99 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +unclejack + +Alexandr Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + +Francisco Carriedo + diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..174afae8 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,39 @@ +# Note: right now we don't use go-specific features of travis. +# Later we might automate "go test" etc. (or do it inside a docker container...?) + +language: go + +go: +# This should match the version in the Dockerfile. + - 1.3.1 +# Test against older versions too, just for a little extra retrocompat. + - 1.2 + +# Let us have pretty experimental Docker-based Travis workers. +# (These spin up much faster than the VM-based ones.) +sudo: false + +# Disable the normal go build. +install: + - export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false") + - export AUTO_GOPATH=1 +# some of Docker's unit tests don't work inside Travis (yet!), so we purge those test files for now + - rm -f daemon/graphdriver/btrfs/*_test.go # fails to compile (missing header) + - rm -f daemon/graphdriver/devmapper/*_test.go # fails to compile (missing header) + - rm -f daemon/execdriver/lxc/*_test.go # fails to run (missing "lxc-start") + - rm -f daemon/graphdriver/aufs/*_test.go # fails to run ("backing file system is unsupported for this graph driver") + - rm -f daemon/graphdriver/vfs/*_test.go # fails to run (not root, which these tests assume "/var/tmp/... no owned by uid 0") + - rm -f daemon/networkdriver/bridge/*_test.go # fails to run ("Failed to initialize network driver") + - rm -f graph/*_test.go # fails to run ("mkdir /tmp/docker-test.../vfs/dir/foo/etc/postgres: permission denied") + - rm -f pkg/mount/*_test.go # fails to run ("permission denied") + +before_script: + - env | sort + +script: + - hack/make.sh validate-dco + - hack/make.sh validate-gofmt + - DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary + - ./hack/make.sh dynbinary dyntest-unit + +# vim:set sw=2 ts=2: diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 00000000..43904e9e --- /dev/null +++ b/AUTHORS @@ -0,0 +1,597 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Feng +Aaron Huslage +Abel Muiño +Adam Miller +Adam Singer +Aditya +Adrian Mouat +Adrien Folie +AJ Bowen +Al Tobey +alambike +Albert Zhang +Aleksa Sarai +Alex Gaynor +Alex Warhawk +Alexander Larsson +Alexander Shopov +Alexandr Morozov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +almoehi +amangoel +AnandkumarPatel +Andre Dublin <81dublin@gmail.com> +Andrea Luzzardi +Andrea Turli +Andreas Savvides +Andreas Tiefenthaler +Andrew Duckworth +Andrew France +Andrew Macgregor +Andrew Munsell +Andrew Weiss +Andrew Williams +Andrews Medina +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Anthony Bishopric +Anton Löfgren +Anton Nikitin +Antony Messerli +apocas +Arnaud Porterie +Asbjørn Enge +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +bdevloed +Ben Firshman +Ben Sargent +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bhiraj Butala +bin liu +Bouke Haarsma +Boyd Hemphill +Brandon Liu +Brandon Philips +Brandon Rhodes +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Renié +Bryan Bess +Bryan Matsuo +Bryan Murphy +Caleb Spare +Calen Pennington +Cameron Boehmer +Carl X. Su +Charles Hooper +Charles Lindsay +Charles Merriam +Charlie Lewis +Chewey +Chia-liang Kao +Chris Alfonso +Chris Snow +Chris St. Pierre +chrismckinnel +Christian Berendt +ChristoperBiscardi +Christophe Troestler +Christopher Currie +Christopher Rigor +Ciro S. Costa +Clayton Coleman +Colin Dunklau +Colin Rice +Colin Walters +Cory Forsyth +cpuguy83 +cressie176 +Cruceru Calin-Cristian +Daan van Berkel +Dafydd Crosby +Dan Buch +Dan Hirsch +Dan Keder +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Exner +Daniel Garcia +Daniel Gasienica +Daniel Mizyrycki +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel Von Fange +Daniel YC Lin +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +David Anderson +David Calavera +David Corking +David Gageot +David Mcanulty +David Röthlisberger +David Sissitka +Deni Bertovic +Derek +Deric Crago +Dinesh Subhraveti +Djibril Koné +dkumor +Dmitry Demeshchuk +Dolph Mathews +Dominik Honnef +Don Spaulding +Doug Davis +doug tangren +Dr Nic Williams +Dražen Lučanin +Dustin Sallings +Edmund Wagner +Eiichi Tsukata +Eivind Uggedal +Elias Probst +Emil Hernvall +Emily Rose +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Windisch +Eric Windisch +Erik Hollensbe +Erik Inge Bolsø +Erno Hopearuoho +eugenkrizo +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +evanderkoogh +Eystein MÃ¥løy Stenberg +ezbercih +Fabio Falci +Fabio Rehm +Fabrizio Regini +Faiz Khan +Fareed Dudhia +Felix Rabe +Fernando +Flavio Castelli +FLGMwt +Francisco Carriedo +Francisco Souza +Frank Macreery +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Freek Kalter +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +Galen Sampson +Gareth Rushgrove +Geoffrey Bachelet +Gereon Frey +German DZ +Gert van Valkenhoef +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Glyn Normington +Goffert van Gool +Graydon Hoare +Greg Thornton +grunny +Guilherme Salgado +Guillaume J. Charmes +Gurjeet Singh +Guruprasad +Harald Albers +Harley Laue +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hollie Teal +hollietealok +Hunter Blanks +hyeongkyu.lee +Ian Babrou +Ian Bull +Ian Main +Ian Truslove +ILYA Khlopotov +inglesp +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Fraixedes +Jack Danger Canty +Jake Moshenko +jakedt +James Allen +James Carr +James DeFelice +James Harrison Fisher +James Kyle +James Mills +James Turnbull +Jan Pazdziora +Jan Toebes +Jaroslaw Zabiello +jaseg +Jason Giedymin +Jason Hall +Jason Livesay +Jason McVetta +Jason Plum +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jeff Lindsay +Jeff Welch +Jeffrey Bolle +Jeremy Grosser +Jesse Dubay +Jezeniel Zapanta +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jiří Župka +Joe Beda +Joe Shaw +Joe Van Dyk +Joel Handwell +Joffrey F +Johan Euphrosine +Johan Rydberg +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John OBrien III +John Warwick +Jon Wedaman +Jonas Pfenniger +Jonathan Boulle +Jonathan Camp +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Joost Cassee +Jordan Arentsen +Jordan Sissel +Joseph Anthony Pasquale Holsten +Joseph Hager +Josh +Josh Hawn +Josh Poimboeuf +JP +Julien Barbier +Julien Bordellier +Julien Dubois +Justin Force +Justin Plock +Justin Simonelis +Jérôme Petazzoni +Karan Lyons +Karl Grzeszczak +Kato Kazuyoshi +Kawsar Saiyeed +Keli Hu +Ken Cochrane +Ken ICHIKAWA +Kevin "qwazerty" Houdebert +Kevin Clark +Kevin J. Lynagh +Kevin Menard +Kevin Wallace +Keyvan Fatehi +kies +Kim BKC Carlbacker +kim0 +Kimbro Staken +Kiran Gangadharan +knappe +Kohei Tsuruta +Konstantin Pelykh +Kyle Conroy +kyu +Lachlan Coote +lalyos +Lance Chen +Lars R. Damerow +Laurie Voss +leeplay +Len Weincier +Levi Gross +Lewis Peckover +Liang-Chi Hsieh +Lokesh Mandvekar +Louis Opter +lukaspustina +lukemarsden +Mahesh Tiyyagura +Manfred Zabarauskas +Manuel Meurer +Manuel Woelker +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marco Hennings +Marcus Farkas +Marcus Ramberg +marcuslinke +Marek Goldmann +Marius Voila +Mark Allen +Mark McGranaghan +Marko Mikulicic +Markus Fix +Martijn van Oosterhout +Martin Redmond +Mason Malone +Mateusz Sulima +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Haggard +Matthew Heon +Matthew Mueller +Matthias Klumpp +Matthias Kühnle +mattymo +mattyw +Max Shytikov +Maxim Treskin +Maxime Petazzoni +meejah +Michael Brown +Michael Crosby +Michael Gorsuch +Michael Neale +Michael Prokop +Michael Stapelberg +Michaël Pailloncy +Michiel@unhosted +Miguel Angel Fernández +Mike Chelen +Mike Gaffney +Mike MacCana +Mike Naberezny +Mike Snitzer +Mikhail Sobolev +Mohit Soni +Morgante Pell +Morten Siebuhr +Mrunal Patel +Nan Monnand Deng +Naoki Orii +Nate Jones +Nathan Kleyn +Nathan LeClaire +Nelson Chen +Niall O'Higgins +Nick Payne +Nick Stenning +Nick Stinemates +Nicolas Dudebout +Nicolas Kaiser +NikolaMandic +noducks +Nolan Darilek +O.S. Tezer +OddBloke +odk- +Oguz Bilgic +Ole Reifschneider +Olivier Gambier +pandrew +Pascal Borreli +Patrick Hemmer +pattichen +Paul +Paul Annesley +Paul Bowsher +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Morie +Paul Nasrat +Paul Weaver +Peter Bourgon +Peter Braden +Peter Waller +Phil +Phil Spitler +Phillip Alexander +Piergiuliano Bossi +Pierre-Alain RIVIERE +Piotr Bogdan +pysqz +Quentin Brossard +r0n22 +Rafal Jeczalik +Rajat Pandit +Rajdeep Dua +Ralph Bean +Ramkumar Ramachandra +Ramon van Alteren +Renato Riccieri Santos Zannon +rgstephens +Rhys Hiltner +Richard Harvey +Richo Healey +Rick Bradley +Rick van de Loo +Robert Bachmann +Robert Obryk +Roberto G. Hashioka +Robin Speekenbrink +robpc +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Roland Huß +Roland Moriz +Ron Smits +Rovanion Luckey +Rudolph Gottesheim +Ryan Anderson +Ryan Aslett +Ryan Fowler +Ryan O'Donnell +Ryan Seto +Ryan Thomas +s-ko +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Reis +Sam Rijs +Samuel Andaya +satoru +Satoshi Amemiya +Scott Bessler +Scott Collier +Sean Cronin +Sean P. Kane +Sebastiaan van Stijn +Sebastiaan van Stijn +Senthil Kumar Selvaraj +SeongJae Park +Shane Canon +shaunol +Shawn Landden +Shawn Siefkas +Shih-Yuan Lee +Silas Sewell +Simon Taranto +Sindhu S +Sjoerd Langkemper +Solomon Hykes +Song Gao +Soulou +soulshake +Sridatta Thatipamala +Sridhar Ratnakumar +Steeve Morin +Stefan Praszalowicz +Stephen Crosby +Steven Burgess +sudosurootdev +Sven Dowideit +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +tang0th +Tatsuki Sugiura +Tehmasp Chaudhri +Thatcher Peskens +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Hansen +Thomas LEVEIL +Thomas Schroeter +Tianon Gravi +Tibor Vass +Tim Bosse +Tim Ruffles +Tim Ruffles +Tim Terhorst +Timothy Hobbs +tjmehta +Tobias Bieniek +Tobias Gesellchen +Tobias Schmidt +Tobias Schwab +Todd Lunter +Tom Fotherby +Tom Hulihan +Tom Maaswinkel +Tommaso Visconti +Tony Daws +tpng +Travis Cline +Trent Ogren +Tyler Brock +Tzu-Jung Lee +Ulysse Carion +unclejack +vgeta +Victor Coisne +Victor Lyuboslavsky +Victor Marmol +Victor Vieux +Viktor Vojnovski +Vincent Batts +Vincent Bernat +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Rutsky +waitingkuo +Walter Leibbrandt +Walter Stanish +WarheadsSE +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +William Delanoue +William Henry +William Riancho +William Thurston +wyc +Xiuming Chen +Yang Bai +Yasunori Mahata +Yurii Rashkovskii +Zac Dover +Zain Memon +Zaiste! +Zane DeGraffenried +Zilin Du +zimbatm +Zoltan Tombol +zqh +Álvaro Lázaro diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..fdc05c10 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,1563 @@ +# Changelog + +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict envrionment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Excluse more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existing devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existing devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + +## 0.9.0 (2014-03-10) + +#### Builder +- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. +- Add error to docker build --rm. This adds missing error handling. +- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. +- Make `--rm` the default for `docker build`. + +#### Documentation +- Download the docker client binary for Mac over https. +- Update the titles of the install instructions & descriptions. +* Add instructions for upgrading boot2docker. +* Add port forwarding example in OS X install docs. +- Attempt to disentangle repository and registry. +- Update docs to explain more about `docker ps`. +- Update sshd example to use a Dockerfile. +- Rework some examples, including the Python examples. +- Update docs to include instructions for a container's lifecycle. +- Update docs documentation to discuss the docs branch. +- Don't skip cert check for an example & use HTTPS. +- Bring back the memory and swap accounting section which was lost when the kernel page was removed. +- Explain DNS warnings and how to fix them on systems running and using a local nameserver. + +#### Contrib +- Add Tanglu support for mkimage-debootstrap. +- Add SteamOS support for mkimage-debootstrap. + +#### Hack +- Get package coverage when running integration tests. +- Remove the Vagrantfile. This is being replaced with boot2docker. +- Fix tests on systems where aufs isn't available. +- Update packaging instructions and remove the dependency on lxc. + +#### Remote API +* Move code specific to the API to the api package. +- Fix header content type for the API. Makes all endpoints use proper content type. +- Fix registry auth & remove ping calls from CmdPush and CmdPull. +- Add newlines to the JSON stream functions. + +#### Runtime +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. +- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. +- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. +- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. +* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. +- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. +- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. +- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. +- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. +- Fix custom bridge related options. This makes custom bridges work again. ++ Mount-bind the PTY as container console. This allows tmux/screen to run. ++ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. ++ Add native exec driver which uses libcontainer and make it the default exec driver. +- Add support for handling extended attributes in archives. +* Set the container MTU to be the same as the host MTU. ++ Add simple sha256 checksums for layers to speed up `docker push`. +* Improve kernel version parsing. +* Allow flag grouping (`docker run -it`). +- Remove chroot exec driver. +- Fix divide by zero to fix panic. +- Rewrite `docker rmi`. +- Fix docker info with lxc 1.0.0. +- Fix fedora tty with apparmor. +* Don't always append env vars, replace defaults with vars from config. +* Fix a goroutine leak. +* Switch to Go 1.2.1. +- Fix unique constraint error checks. +* Handle symlinks for Docker's data directory and for TMPDIR. +- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) +- Add apparmor profile for the native execution driver. +* Move system specific code from archive to pkg/system. +- Fix duplicate signal for `docker run -i -t` (issue #3336). +- Return correct process pid for lxc. +- Add a -G option to specify the group which unix sockets belong to. ++ Add `-f` flag to `docker rm` to force removal of running containers. ++ Kill ghost containers and restart all ghost containers when the docker daemon restarts. ++ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. + +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgresSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OSX +* The Docker daemon supports systemd socket activation + +## 0.7.6 (2014-01-14) + +#### Builder + +* Do not follow symlink outside of build context + +#### Runtime + +- Remount bind mounts when ro is specified +* Use https for fetching docker version + +#### Other + +* Inline the test.docker.io fingerprint +* Add ca-certificates to packaging documentation + +## 0.7.5 (2014-01-09) + +#### Builder + +* Disable compression for build. More space usage but a much faster upload +- Fix ADD caching for certain paths +- Do not compress archive from git build + +#### Documentation + +- Fix error in GROUP add example +* Make sure the GPG fingerprint is inline in the documentation +* Give more specific advice on setting up signing of commits for DCO + +#### Runtime + +- Fix misspelled container names +- Do not add hostname when networking is disabled +* Return most recent image from the cache by date +- Return all errors from docker wait +* Add Content-Type Header "application/json" to GET /version and /info responses + +#### Other + +* Update DCO to version 1.1 ++ Update Makefile to use "docker:GIT_BRANCH" as the generated image name +* Update Travis to check for new 1.1 DCO version + +## 0.7.4 (2014-01-07) + +#### Builder + +- Fix ADD caching issue with . prefixed path +- Fix docker build on devicemapper by reverting sparse file tar option +- Fix issue with file caching and prevent wrong cache hit +* Use same error handling while unmarshalling CMD and ENTRYPOINT + +#### Documentation + +* Simplify and streamline Amazon Quickstart +* Install instructions use unprefixed fedora image +* Update instructions for mtu flag for Docker on GCE ++ Add Ubuntu Saucy to installation +- Fix for wrong version warning on master instead of latest + +#### Runtime + +- Only get the image's rootfs when we need to calculate the image size +- Correctly handle unmapping UDP ports +* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build +- Fix login message to say pull instead of push +- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN +* Make blank -H option default to the same as no -H was sent +* Extract cgroups utilities to own submodule + +#### Other + ++ Add Travis CI configuration to validate DCO and gofmt requirements ++ Add Developer Certificate of Origin Text +* Upgrade VBox Guest Additions +* Check standalone header when pinging a registry server + +## 0.7.3 (2014-01-02) + +#### Builder + ++ Update ADD to use the image cache, based on a hash of the added content +* Add error message for empty Dockerfile + +#### Documentation + +- Fix outdated link to the "Introduction" on www.docker.io ++ Update the docs to get wider when the screen does +- Add information about needing to install LXC when using raw binaries +* Update Fedora documentation to disentangle the docker and docker.io conflict +* Add a note about using the new `-mtu` flag in several GCE zones ++ Add FrugalWare installation instructions ++ Add a more complete example of `docker run` +- Fix API documentation for creating and starting Privileged containers +- Add missing "name" parameter documentation on "/containers/create" +* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration +- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + +#### Hack + +- Add missing libdevmapper dependency to the packagers documentation +* Update minimum Go requirement to a hard line at Go 1.2+ +* Many minor improvements to the Vagrantfile ++ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) ++ Add coverprofile generation reporting +- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually +* Update Dockerfile to be more canonical and have less spurious warnings during build +- Fix some miscellaneous `docker pull` progress bar display issues +* Migrate more miscellaneous packages under the "pkg" folder +* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" +* Reorganize syntax highlighting files under a common "contrib/syntax" directory +* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation +* Add support for container names in bash completion + +#### Packaging + ++ Add an official Docker client binary for Darwin (Mac OS X) +* Remove empty "Vendor" string and added "License" on deb package ++ Add a stubbed version of "/etc/default/docker" in the deb package + +#### Runtime + +* Update layer application to extract tars in place, avoiding file churn while handling whiteouts +- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) +* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) ++ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions +- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files +* Update container name validation to include '.' +- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected +* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler +* Update to use proper box-drawing characters everywhere in `docker images -tree` +* Move MTU setting from LXC configuration to directly use netlink +* Add `-S` option to external tar invocation for more efficient spare file handling ++ Add arch/os info to User-Agent string, especially for registry requests ++ Add `-mtu` option to Docker daemon for configuring MTU +- Fix `docker build` to exit with a non-zero exit code on error ++ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +## 0.7.2 (2013-12-16) + +#### Runtime + ++ Validate container names on creation with standard regex +* Increase maximum image depth to 127 from 42 +* Continue to move api endpoints to the job api ++ Add -bip flag to allow specification of dynamic bridge IP via CIDR +- Allow bridge creation when ipv6 is not enabled on certain systems +* Set hostname and IP address from within dockerinit +* Drop capabilities from within dockerinit +- Fix volumes on host when symlink is present the image +- Prevent deletion of image if ANY container is depending on it even if the container is not running +* Update docker push to use new progress display +* Use os.Lstat to allow mounting unix sockets when inspecting volumes +- Adjust handling of inactive user login +- Add missing defines in devicemapper for older kernels +- Allow untag operations with no container validation +- Add auth config to docker build + +#### Documentation + +* Add more information about Docker logging ++ Add RHEL documentation +* Add a direct example for changing the CMD that is run in a container +* Update Arch installation documentation ++ Add section on Trusted Builds ++ Add Network documentation page + +#### Other + ++ Add new cover bundle for providing code coverage reporting +* Separate integration tests in bundles +* Make Tianon the hack maintainer +* Update mkimage-debootstrap with more tweaks for keeping images small +* Use https to get the install script +* Remove vendored dotcloud/tar now that Go 1.2 has been released + +## 0.7.1 (2013-12-05) + +#### Documentation + ++ Add @SvenDowideit as documentation maintainer ++ Add links example ++ Add documentation regarding ambassador pattern ++ Add Google Cloud Platform docs ++ Add dockerfile best practices +* Update doc for RHEL +* Update doc for registry +* Update Postgres examples +* Update doc for Ubuntu install +* Improve remote api doc + +#### Runtime + ++ Add hostconfig to docker inspect ++ Implement `docker log -f` to stream logs ++ Add env variable to disable kernel version warning ++ Add -format to `docker inspect` ++ Support bind-mount for files +- Fix bridge creation on RHEL +- Fix image size calculation +- Make sure iptables are called even if the bridge already exists +- Fix issue with stderr only attach +- Remove init layer when destroying a container +- Fix same port binding on different interfaces +- `docker build` now returns the correct exit code +- Fix `docker port` to display correct port +- `docker build` now check that the dockerfile exists client side +- `docker attach` now returns the correct exit code +- Remove the name entry when the container does not exist + +#### Registry + +* Improve progress bars, add ETA for downloads +* Simultaneous pulls now waits for the first to finish instead of failing +- Tag only the top-layer image when pushing to registry +- Fix issue with offline image transfer +- Fix issue preventing using ':' in password for registry + +#### Other + ++ Add pprof handler for debug ++ Create a Makefile +* Use stdlib tar that now includes fix +* Improve make.sh test script +* Handle SIGQUIT on the daemon +* Disable verbose during tests +* Upgrade to go1.2 for official build +* Improve unit tests +* The test suite now runs all tests even if one fails +* Refactor C in Go (Devmapper) +- Fix OSX compilation + +## 0.7.0 (2013-11-25) + +#### Notable features since 0.6.0 + +* Storage drivers: choose from aufs, device-mapper, or vfs. +* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. +* Links: compose complex software stacks by connecting containers to each other. +* Container naming: organize your containers by giving them memorable names. +* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. +* Offline transfer: push and pull images to the filesystem without losing information. +* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + +## 0.6.7 (2013-11-21) + +#### Runtime + +* Improve stability, fixes some race conditons +* Skip the volumes mounted when deleting the volumes of container. +* Fix layer size computation: handle hard links correctly +* Use the work Path for docker cp CONTAINER:PATH +* Fix tmp dir never cleanup +* Speedup docker ps +* More informative error message on name collisions +* Fix nameserver regex +* Always return long id's +* Fix container restart race condition +* Keep published ports on docker stop;docker start +* Fix container networking on Fedora +* Correctly express "any address" to iptables +* Fix network setup when reconnecting to ghost container +* Prevent deletion if image is used by a running container +* Lock around read operations in graph + +#### RemoteAPI + +* Return full ID on docker rmi + +#### Client + ++ Add -tree option to images ++ Offline image transfer +* Exit with status 2 on usage error and display usage on stderr +* Do not forward SIGCHLD to container +* Use string timestamp for docker events -since + +#### Other + +* Update to go 1.2rc5 ++ Add /etc/default/docker support to upstart + +## 0.6.6 (2013-11-06) + +#### Runtime + +* Ensure container name on register +* Fix regression in /etc/hosts ++ Add lock around write operations in graph +* Check if port is valid +* Fix restart runtime error with ghost container networking ++ Add some more colors and animals to increase the pool of generated names +* Fix issues in docker inspect ++ Escape apparmor confinement ++ Set environment variables using a file. +* Prevent docker insert to erase something ++ Prevent DNS server conflicts in CreateBridgeIface ++ Validate bind mounts on the server side ++ Use parent image config in docker build +* Fix regression in /etc/hosts + +#### Client + ++ Add -P flag to publish all exposed ports ++ Add -notrunc and -q flags to docker history +* Fix docker commit, tag and import usage ++ Add stars, trusted builds and library flags in docker search +* Fix docker logs with tty + +#### RemoteAPI + +* Make /events API send headers immediately +* Do not split last column docker top ++ Add size to history + +#### Other + ++ Contrib: Desktop integration. Firefox usecase. ++ Dockerfile: bump to go1.2rc3 + +## 0.6.5 (2013-10-29) + +#### Runtime + ++ Containers can now be named ++ Containers can now be linked together for service discovery ++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors ++ Automatically start crashed containers after a reboot ++ Expose IP, port, and proto as separate environment vars for container links +* Allow ports to be published to specific ips +* Prohibit inter-container communication by default +- Ignore ErrClosedPipe for stdin in Container.Attach +- Remove unused field kernelVersion +* Fix issue when mounting subdirectories of /mnt in container +- Fix untag during removal of images +* Check return value of syscall.Chdir when changing working directory inside dockerinit + +#### Client + +- Only pass stdin to hijack when needed to avoid closed pipe errors +* Use less reflection in command-line method invocation +- Monitor the tty size after starting the container, not prior +- Remove useless os.Exit() calls after log.Fatal + +#### Hack + ++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian +* Add -p option to invoke debootstrap with http_proxy +- Update install.sh with $sh_c to get sudo/su for modprobe +* Update all the mkimage scripts to use --numeric-owner as a tar argument +* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues + +#### Other + +* Documentation: Fix the flags for nc in example +* Testing: Remove warnings and prevent mount issues +- Testing: Change logic for tty resize to avoid warning in tests +- Builder: Fix race condition in docker build with verbose output +- Registry: Fix content-type for PushImageJSONIndex method +* Contrib: Improve helper tools to generate debian and Arch linux server images + +## 0.6.4 (2013-10-16) + +#### Runtime + +- Add cleanup of container when Start() fails +* Add better comments to utils/stdcopy.go +* Add utils.Errorf for error logging ++ Add -rm to docker run for removing a container on exit +- Remove error messages which are not actually errors +- Fix `docker rm` with volumes +- Fix some error cases where a HTTP body might not be closed +- Fix panic with wrong dockercfg file +- Fix the attach behavior with -i +* Record termination time in state. +- Use empty string so TempDir uses the OS's temp dir automatically +- Make sure to close the network allocators ++ Autorestart containers by default +* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` +* lxc: Allow set_file_cap capability in container +- Move run -rm to the cli only +* Split stdout stderr +* Always create a new session for the container + +#### Testing + +- Add aggregated docker-ci email report +- Add cleanup to remove leftover containers +* Add nightly release to docker-ci +* Add more tests around auth.ResolveAuthConfig +- Remove a few errors in tests +- Catch errClosing error when TCP and UDP proxies are terminated +* Only run certain tests with TESTFLAGS='-run TestName' make.sh +* Prevent docker-ci to test closing PRs +* Replace panic by log.Fatal in tests +- Increase TestRunDetach timeout + +#### Documentation + +* Add initial draft of the Docker infrastructure doc +* Add devenvironment link to CONTRIBUTING.md +* Add `apt-get install curl` to Ubuntu docs +* Add explanation for export restrictions +* Add .dockercfg doc +* Remove Gentoo install notes about #1422 workaround +* Fix help text for -v option +* Fix Ping endpoint documentation +- Fix parameter names in docs for ADD command +- Fix ironic typo in changelog +* Various command fixes in postgres example +* Document how to edit and release docs +- Minor updates to `postgresql_service.rst` +* Clarify LGTM process to contributors +- Corrected error in the package name +* Document what `vagrant up` is actually doing ++ improve doc search results +* Cleanup whitespace in API 1.5 docs +* use angle brackets in MAINTAINER example email +* Update archlinux.rst ++ Changes to a new style for the docs. Includes version switcher. +* Formatting, add information about multiline json +* Improve registry and index REST API documentation +- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 +* Update Gentoo installation documentation now that we're in the portage tree proper +* Cleanup and reorganize docs and tooling for contributors and maintainers +- Minor spelling correction of protocoll -> protocol + +#### Contrib + +* Add vim syntax highlighting for Dockerfiles from @honza +* Add mkimage-arch.sh +* Reorganize contributed completion scripts to add zsh completion + +#### Hack + +* Add vagrant user to the docker group +* Add proper bash completion for "docker push" +* Add xz utils as a runtime dep +* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates ++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link +* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly ++ Add @tianon to hack/MAINTAINERS +* Improve network performance for VirtualBox +* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) +- Fix contrib/mkimage-debian.sh apt caching prevention ++ Add Dockerfile.tmLanguage to contrib +* Configured FPM to make /etc/init/docker.conf a config file +* Enable SSH Agent forwarding in Vagrant VM +* Several small tweaks/fixes for contrib/mkimage-debian.sh + +#### Other + +- Builder: Abort build if mergeConfig returns an error and fix duplicate error message +- Packaging: Remove deprecated packaging directory +- Registry: Use correct auth config when logging in. +- Registry: Fix the error message so it is the same as the regex + +## 0.6.3 (2013-09-23) + +#### Packaging + +* Add 'docker' group on install for ubuntu package +* Update tar vendor dependency +* Download apt key over HTTPS + +#### Runtime + +- Only copy and change permissions on non-bindmount volumes +* Allow multiple volumes-from +- Fix HTTP imports from STDIN + +#### Documentation + +* Update section on extracting the docker binary after build +* Update development environment docs for new build process +* Remove 'base' image from documentation + +#### Other + +- Client: Fix detach issue +- Registry: Update regular expression to match index + +## 0.6.2 (2013-09-17) + +#### Runtime + ++ Add domainname support ++ Implement image filtering with path.Match +* Remove unnecessary warnings +* Remove os/user dependency +* Only mount the hostname file when the config exists +* Handle signals within the `docker login` command +- UID and GID are now also applied to volumes +- `docker start` set error code upon error +- `docker run` set the same error code as the process started + +#### Builder + ++ Add -rm option in order to remove intermediate containers +* Allow multiline for the RUN instruction + +#### Registry + +* Implement login with private registry +- Fix push issues + +#### Other + ++ Hack: Vendor all dependencies +* Remote API: Bump to v1.5 +* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. +* Documentation: General improvements + +## 0.6.1 (2013-08-23) + +#### Registry + +* Pass "meta" headers in API calls to the registry + +#### Packaging + +- Use correct upstart script with new build tool +- Use libffi-dev, don`t build it from sources +- Remove duplicate mercurial install command + +## 0.6.0 (2013-08-22) + +#### Runtime + ++ Add lxc-conf flag to allow custom lxc options ++ Add an option to set the working directory +* Add Image name to LogEvent tests ++ Add -privileged flag and relevant tests, docs, and examples +* Add websocket support to /container//attach/ws +* Add warning when net.ipv4.ip_forwarding = 0 +* Add hostname to environment +* Add last stable version in `docker version` +- Fix race conditions in parallel pull +- Fix Graph ByParent() to generate list of child images per parent image. +- Fix typo: fmt.Sprint -> fmt.Sprintf +- Fix small \n error un docker build +* Fix to "Inject dockerinit at /.dockerinit" +* Fix #910. print user name to docker info output +* Use Go 1.1.2 for dockerbuilder +* Use ranged for loop on channels +- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +- Improve CMD, ENTRYPOINT, and attach docs. +- Improve connect message with socket error +- Load authConfig only when needed and fix useless WARNING +- Show tag used when image is missing +* Apply volumes-from before creating volumes +- Make docker run handle SIGINT/SIGTERM +- Prevent crash when .dockercfg not readable +- Install script should be fetched over https, not http. +* API, issue 1471: Use groups for socket permissions +- Correctly detect IPv4 forwarding +* Mount /dev/shm as a tmpfs +- Switch from http to https for get.docker.io +* Let userland proxy handle container-bound traffic +* Update the Docker CLI to specify a value for the "Host" header. +- Change network range to avoid conflict with EC2 DNS +- Reduce connect and read timeout when pinging the registry +* Parallel pull +- Handle ip route showing mask-less IP addresses +* Allow ENTRYPOINT without CMD +- Always consider localhost as a domain name when parsing the FQN repos name +* Refactor checksum + +#### Documentation + +* Add MongoDB image example +* Add instructions for creating and using the docker group +* Add sudo to examples and installation to documentation +* Add ufw doc +* Add a reference to ps -a +* Add information about Docker`s high level tools over LXC. +* Fix typo in docs for docker run -dns +* Fix a typo in the ubuntu installation guide +* Fix to docs regarding adding docker groups +* Update default -H docs +* Update readme with dependencies for building +* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* PostgreSQL service example in documentation +* Suggest installing linux-headers by default. +* Change the twitter handle +* Clarify Amazon EC2 installation +* 'Base' image is deprecated and should no longer be referenced in the docs. +* Move note about officially supported kernel +- Solved the logo being squished in Safari + +#### Builder + ++ Add USER instruction do Dockerfile ++ Add workdir support for the Buildfile +* Add no cache for docker build +- Fix docker build and docker events output +- Only count known instructions as build steps +- Make sure ENV instruction within build perform a commit each time +- Forbid certain paths within docker build ADD +- Repository name (and optionally a tag) in build usage +- Make sure ADD will create everything in 0755 + +#### Remote API + +* Sort Images by most recent creation date. +* Reworking opaque requests in registry module +* Add image name in /events +* Use mime pkg to parse Content-Type +* 650 http utils and user agent field + +#### Hack + ++ Bash Completion: Limit commands to containers of a relevant state +* Add docker dependencies coverage testing into docker-ci + +#### Packaging + ++ Docker-brew 0.5.2 support and memory footprint reduction +* Add new docker dependencies into docker-ci +- Revert "docker.upstart: avoid spawning a `sh` process" ++ Docker-brew and Docker standard library ++ Release docker with docker +* Fix the upstart script generated by get.docker.io +* Enabled the docs to generate manpages. +* Revert Bind daemon to 0.0.0.0 in Vagrant. + +#### Register + +* Improve auth push +* Registry unit tests + mock registry + +#### Tests + +* Improve TestKillDifferentUser to prevent timeout on buildbot +- Fix typo in TestBindMounts (runContainer called without image) +* Improve TestGetContainersTop so it does not rely on sleep +* Relax the lo interface test to allow iface index != 1 +* Add registry functional test to docker-ci +* Add some tests in server and utils + +#### Other + +* Contrib: bash completion script +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Don`t read from stdout when only attached to stdin + +## 0.5.3 (2013-08-13) + +#### Runtime + +* Use docker group for socket permissions +- Spawn shell within upstart script +- Handle ip route showing mask-less IP addresses +- Add hostname to environment + +#### Builder + +- Make sure ENV instruction within build perform a commit each time + +## 0.5.2 (2013-08-08) + +* Builder: Forbid certain paths within docker build ADD +- Runtime: Change network range to avoid conflict with EC2 DNS +* API: Change daemon to listen on unix socket by default + +## 0.5.1 (2013-07-30) + +#### Runtime + ++ Add `ps` args to `docker top` ++ Add support for container ID files (pidfile like) ++ Add container=lxc in default env ++ Support networkless containers with `docker run -n` and `docker -d -b=none` +* Stdout/stderr logs are now stored in the same file as JSON +* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. +* Change .dockercfg format to json and support multiple auth remote +- Do not override volumes from config +- Fix issue with EXPOSE override + +#### API + ++ Docker client now sets useragent (RFC 2616) ++ Add /events endpoint + +#### Builder + ++ ADD command now understands URLs ++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables +- Create directories with 755 instead of 700 within ADD instruction + +#### Hack + +* Simplify unit tests with helpers +* Improve docker.upstart event +* Add coverage testing into docker-ci + +## 0.5.0 (2013-07-17) + +#### Runtime + ++ List all processes running inside a container with 'docker top' ++ Host directories can be mounted as volumes with 'docker run -v' ++ Containers can expose public UDP ports (eg, '-p 123/udp') ++ Optionally specify an exact public port (eg. '-p 80:4500') +* 'docker login' supports additional options +- Dont save a container`s hostname when committing an image. + +#### Registry + ++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries +- Fix issues when uploading images to a private registry + +#### Builder + ++ ENTRYPOINT instruction sets a default binary entry point to a container ++ VOLUME instruction marks a part of the container as persistent data +* 'docker build' displays the full output of a build by default + +## 0.4.8 (2013-07-01) + ++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. +- Tests: Fix issues in the test suite + +## 0.4.7 (2013-06-28) + +#### Remote API + +* The progress bar updates faster when downloading and uploading large files +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version ++ Host directories can be mounted as volumes with 'docker run -b' +- fix an issue when only attaching to stdin +* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + +#### Hack + +* Improve test suite and dev environment +* Remove dependency on unit tests on 'os/user' + +#### Other + +* Registry: easier push/pull to a custom registry ++ Documentation: add terminology section + +## 0.4.6 (2013-06-22) + +- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + +## 0.4.5 (2013-06-21) + ++ Builder: 'docker build git://URL' fetches and builds a remote git repository +* Runtime: 'docker ps -s' optionally prints container size +* Tests: improved and simplified +- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. +- Builder: fix a regression when using ADD with single regular file. + +## 0.4.4 (2013-06-19) + +- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + +## 0.4.3 (2013-06-19) + +#### Builder + ++ ADD of a local file will detect tar archives and unpack them +* ADD improvements: use tar for copy + automatically unpack local archives +* ADD uses tar/untar for copies instead of calling 'cp -ar' +* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. +- Fix a bug which caused builds to fail if ADD was the first command +* Nicer output for 'docker build' + +#### Runtime + +* Remove bsdtar dependency +* Add unix socket and multiple -H support +* Prevent rm of running containers +* Use go1.1 cookiejar +- Fix issue detaching from running TTY container +- Forbid parallel push/pull for a single image/repo. Fixes #311 +- Fix race condition within Run command when attaching. + +#### Client + +* HumanReadable ProgressBar sizes in pull +* Fix docker version`s git commit output + +#### API + +* Send all tags on History API call +* Add tag lookup to history command. Fixes #882 + +#### Documentation + +- Fix missing command in irc bouncer example + +## 0.4.2 (2013-06-17) + +- Packaging: Bumped version to work around an Ubuntu bug + +## 0.4.1 (2013-06-17) + +#### Remote Api + ++ Add flag to enable cross domain requests ++ Add images and containers sizes in docker ps and docker images + +#### Runtime + ++ Configure dns configuration host-wide with 'docker -d -dns' ++ Detect faulty DNS configuration and replace it with a public default ++ Allow docker run : ++ You can now specify public port (ex: -p 80:4500) +* Improve image removal to garbage-collect unreferenced parents + +#### Client + +* Allow multiple params in inspect +* Print the container id before the hijack in `docker run` + +#### Registry + +* Add regexp check on repo`s name +* Move auth to the client +- Remove login check on pull + +#### Other + +* Vagrantfile: Add the rest api port to vagrantfile`s port_forward +* Upgrade to Go 1.1 +- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n + +## 0.4.0 (2013-06-03) + +#### Builder + ++ Introducing Builder ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + +#### Remote API + ++ Introducing Remote API ++ control Docker programmatically using a simple HTTP/json API + +#### Runtime + +* Various reliability and usability improvements + +## 0.3.4 (2013-05-30) + +#### Builder + ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile ++ 'docker build -t FOO' applies the tag FOO to the newly built container. + +#### Runtime + ++ Interactive TTYs correctly handle window resize +* Fix how configuration is merged between layers + +#### Remote API + ++ Split stdout and stderr on 'docker run' ++ Optionally listen on a different IP and port (use at your own risk) + +#### Documentation + +* Improve install instructions. + +## 0.3.3 (2013-05-23) + +- Registry: Fix push regression +- Various bugfixes + +## 0.3.2 (2013-05-09) + +#### Registry + +* Improve the checksum process +* Use the size to have a good progress bar while pushing +* Use the actual archive if it exists in order to speed up the push +- Fix error 400 on push + +#### Runtime + +* Store the actual archive on commit + +## 0.3.1 (2013-05-08) + +#### Builder + ++ Implement the autorun capability within docker builder ++ Add caching to docker builder ++ Add support for docker builder with native API as top level command ++ Implement ENV within docker builder +- Check the command existence prior create and add Unit tests for the case +* use any whitespaces instead of tabs + +#### Runtime + ++ Add go version to debug infos +* Kernel version - don`t show the dash if flavor is empty + +#### Registry + ++ Add docker search top level command in order to search a repository +- Fix pull for official images with specific tag +- Fix issue when login in with a different user and trying to push +* Improve checksum - async calculation + +#### Images + ++ Output graph of images to dot (graphviz) +- Fix ByParent function + +#### Documentation + ++ New introduction and high-level overview ++ Add the documentation for docker builder +- CSS fix for docker documentation to make REST API docs look better. +- Fix CouchDB example page header mistake +- Fix README formatting +* Update www.docker.io website. + +#### Other + ++ Website: new high-level overview +- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc +* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + +## 0.3.0 (2013-05-06) + +#### Runtime + +- Fix the command existence check +- strings.Split may return an empty string on no match +- Fix an index out of range crash if cgroup memory is not + +#### Documentation + +* Various improvements ++ New example: sharing data between 2 couchdb databases + +#### Other + +* Vagrant: Use only one deb line in /etc/apt ++ Registry: Implement the new registry + +## 0.2.2 (2013-05-03) + ++ Support for data volumes ('docker run -v=PATH') ++ Share data volumes between containers ('docker run -volumes-from') ++ Improve documentation +* Upgrade to Go 1.0.3 +* Various upgrades to the dev environment for contributors + +## 0.2.1 (2013-05-01) + ++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. +* Improve install process on Vagrant ++ New Dockerfile operation: "maintainer" ++ New Dockerfile operation: "expose" ++ New Dockerfile operation: "cmd" ++ Contrib script to build a Debian base layer ++ 'docker -d -r': restart crashed containers at daemon startup +* Runtime: improve test coverage + +## 0.2.0 (2013-04-23) + +- Runtime: ghost containers can be killed and waited for +* Documentation: update install instructions +- Packaging: fix Vagrantfile +- Development: automate releasing binaries and ubuntu packages ++ Add a changelog +- Various bugfixes + +## 0.1.8 (2013-04-22) + +- Dynamically detect cgroup capabilities +- Issue stability warning on kernels <3.8 +- 'docker push' buffers on disk instead of memory +- Fix 'docker diff' for removed files +- Fix 'docker stop' for ghost containers +- Fix handling of pidfile +- Various bugfixes and stability improvements + +## 0.1.7 (2013-04-18) + +- Container ports are available on localhost +- 'docker ps' shows allocated TCP ports +- Contributors can run 'make hack' to start a continuous integration VM +- Streamline ubuntu packaging & uploading +- Various bugfixes and stability improvements + +## 0.1.6 (2013-04-17) + +- Record the author an image with 'docker commit -author' + +## 0.1.5 (2013-04-17) + +- Disable standalone mode +- Use a custom DNS resolver with 'docker -d -dns' +- Detect ghost containers +- Improve diagnosis of missing system capabilities +- Allow disabling memory limits at compile time +- Add debian packaging +- Documentation: installing on Arch Linux +- Documentation: running Redis on docker +- Fix lxc 0.9 compatibility +- Automatically load aufs module +- Various bugfixes and stability improvements + +## 0.1.4 (2013-04-09) + +- Full support for TTY emulation +- Detach from a TTY session with the escape sequence `C-p C-q` +- Various bugfixes and stability improvements +- Minor UI improvements +- Automatically create our own bridge interface 'docker0' + +## 0.1.3 (2013-04-04) + +- Choose TCP frontend port with '-p :PORT' +- Layer format is versioned +- Major reliability improvements to the process manager +- Various bugfixes and stability improvements + +## 0.1.2 (2013-04-03) + +- Set container hostname with 'docker run -h' +- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' +- Various bugfixes and stability improvements +- UI polish +- Progress bar on push/pull +- Use XZ compression by default +- Make IP allocator lazy + +## 0.1.1 (2013-03-31) + +- Display shorthand IDs for convenience +- Stabilize process management +- Layers can include a commit message +- Simplified 'docker attach' +- Fix support for re-attaching +- Various bugfixes and stability improvements +- Auto-download at run +- Auto-login on push +- Beefed up documentation + +## 0.1.0 (2013-03-23) + +Initial public release + +- Implement registry in order to push/pull images +- TCP port allocation +- Fix termcaps on Linux +- Add documentation +- Add Vagrant support with Vagrantfile +- Add unit tests +- Add repository/tags to ease image management +- Improve the layer implementation diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..3ed8bf9d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,316 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! Here are instructions to get you +started. They are probably not perfect, please let us know if anything +feels wrong or incomplete. + +## Topics + +* [Security Reports](#security-reports) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-issues) +* [Build Environment](#build-environment) +* [Contribution Guidelines](#contribution-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Security Reports + +Please **DO NOT** file an issue for security related issues. Please send your +reports to [security@docker.com](mailto:security@docker.com) instead. + +## Design and Cleanup Proposals + +When considering a design proposal, we are looking for: + +* A description of the problem this design proposal solves +* An issue -- not a pull request -- that describes what you will take action on + * Please prefix your issue with `Proposal:` in the title +* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open) + before reporting a new issue. You can always pair with someone if you both + have the same idea. + +When considering a cleanup task, we are looking for: + +* A description of the refactors made + * Please note any logic changes if necessary +* A pull request with the code + * Please prefix your PR's title with `Cleanup:` so we can quickly address it. + * Your pull request must remain up to date with master, so rebase as necessary. + +## Reporting Issues + +When reporting [issues](https://github.com/docker/docker/issues) on +GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc). +Please include: + +* The output of `uname -a`. +* The output of `docker version`. +* The output of `docker -D info`. + +Please also include the steps required to reproduce the problem if +possible and applicable. This information will help us review and fix +your issue faster. + +## Build Environment + +For instructions on setting up your development environment, please +see our dedicated [dev environment setup +docs](http://docs.docker.com/contributing/devenvironment/). + +## Contribution guidelines + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep Docker lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. However, there might be a way to implement +that feature *on top of* Docker. + +### Discuss your design on the mailing list + +We recommend discussing your plans [on the mailing +list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev) +before starting to code - especially for more ambitious contributions. +This gives other contributors a chance to point you in the right +direction, give feedback on your design, and maybe point out if someone +else is working on the same thing. + +### Create issues... + +Any significant improvement should be documented as [a GitHub +issue](https://github.com/docker/docker/issues) before anybody +starts working on it. + +### ...but check for existing issues first! + +Please take a moment to check that an issue doesn't already exist +documenting your bug report or improvement proposal. If it does, it +never hurts to add a quick "+1" or "I have this problem too". This will +help prioritize the most common problems and requests. + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the + issue. +- If it's a feature branch, create an enhancement issue to announce your + intentions, and name it XXXX-something where XXXX is the number of the issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. Run the full test suite on +your branch before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. See `docs/README.md` for more +information on building the docs and how they get released. + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Pull requests must be cleanly rebased ontop of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite should be passing. Include documentation changes in the +same commit so that a revert would remove all traces of the feature or fix. + +Commits that fix or close an issue should include a reference like +`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the +issue when merged. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated +regularly from the Git history. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review +to indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Using your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still +accepted, so there is no need to update outstanding pull requests to the new +format right away, but please do adjust your processes for future contributions. + +#### Small patch exception + +There are several exceptions to the signing requirement. Currently these are: + +* Your patch fixes spelling or grammar errors. +* Your patch is a single line change to documentation contained in the + `docs` directory. +* Your patch fixes Markdown formatting or syntax errors in the + documentation contained in the `docs` directory. + +If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.com) + +### How can I become a maintainer? + +* Step 1: Learn the component inside out +* Step 2: Make yourself useful by contributing code, bug fixes, support etc. +* Step 3: Volunteer on the IRC channel (#docker at Freenode) +* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +### IRC Meetings + +There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones. +Anybody can ask for a topic to be discussed prior to the meeting. + +If you feel the conversation is going off-topic, feel free to point it out. + +For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes). +They also contain all the notes from previous meetings. + +## Docker Community Guidelines + +We want to keep the Docker community awesome, growing and collaborative. We +need your help to keep it that way. To help with this we've come up with some +general guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: no + regional, racial, gender, or other abuse will be tolerated. We like nice people + way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community + feel welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break the + law. + +* Stay on topic: Make sure that you are posting to the correct channel + and avoid off-topic discussions. Remember when you update an issue or + respond to an email you are potentially sending to a large number of + people. Please consider this before you update. Also remember that + nobody likes spam. + +### Guideline Violations — 3 Strikes Method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't + hold a grudge. + +* People who commit minor infractions will get some education, + rather than hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how + much you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or + forgiveness. + +* Contact james@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with + a fair solution if there has been a misunderstanding. + diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..bd943dee --- /dev/null +++ b/Dockerfile @@ -0,0 +1,113 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: Apparmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +docker-version 0.6.1 +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + lxc=1.0* \ + mercurial \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV PATH /go/bin:$PATH +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 + +# Install man page generator +RUN mkdir -p /go/src/github.com/cpuguy83 \ + && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ + && cd /go/src/github.com/cpuguy83/go-md2man \ + && go get -v ./... + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Get the "cirros" image source so we can import it instead of fetching it during tests +RUN curl -sSL -o /cirros.tar.gz https://github.com/ewindisch/docker-cirros/raw/1cded459668e8b9dbf4ef976c94c05add9bbd8e9/cirros-0.3.0-x86_64-lxc.tar.gz + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > $HOME/.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 00000000..2947eb35 --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,9 @@ +Solomon Hykes (@shykes) +Victor Vieux (@vieux) +Michael Crosby (@crosbymichael) +.mailmap: Tianon Gravi (@tianon) +.travis.yml: Tianon Gravi (@tianon) +AUTHORS: Tianon Gravi (@tianon) +Dockerfile: Tianon Gravi (@tianon) +Makefile: Tianon Gravi (@tianon) +.dockerignore: Tianon Gravi (@tianon) diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..842cc18e --- /dev/null +++ b/Makefile @@ -0,0 +1,68 @@ +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate + +# to allow `make BINDDIR=. shell` or `make BINDDIR= test` +# (default to no bind mount if DOCKER_HOST is set) +BINDDIR := $(if $(DOCKER_HOST),,bundles) +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) +DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)") + +DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" +# to allow `make DOCSDIR=docs docs-shell` +DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET + +default: binary + +all: build + $(DOCKER_RUN_DOCKER) hack/make.sh + +binary: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +cross: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary cross + +docs: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve + +docs-shell: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash + +docs-release: docs-build + $(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh + +test: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli + +test-unit: build + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +test-integration: build + $(DOCKER_RUN_DOCKER) hack/make.sh test-integration + +test-integration-cli: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli + +validate: build + $(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco + +shell: build + $(DOCKER_RUN_DOCKER) bash + +build: bundles + docker build -t "$(DOCKER_IMAGE)" . + +docs-build: + cp ./VERSION docs/VERSION + echo "$(GIT_BRANCH)" > docs/GIT_BRANCH + echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET + echo "$(GITCOMMIT)" > docs/GITCOMMIT + docker build -t "$(DOCKER_DOCS_IMAGE)" docs + +bundles: + mkdir bundles diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..d0e0639a --- /dev/null +++ b/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2014 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/README.md b/README.md new file mode 100644 index 00000000..857cd3c7 --- /dev/null +++ b/README.md @@ -0,0 +1,205 @@ +Docker: the Linux container engine +================================== + +Docker is an open source project to pack, ship and run any application +as a lightweight container + +Docker containers are both *hardware-agnostic* and *platform-agnostic*. +This means that they can run anywhere, from your laptop to the largest +EC2 compute instance and everything in between - and they don't require +that you use a particular language, framework or packaging system. That +makes them great building blocks for deploying and scaling web apps, +databases and backend services without depending on a particular stack +or provider. + +Docker is an open-source implementation of the deployment engine which +powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service. +It benefits directly from the experience accumulated over several years +of large-scale operation and support of hundreds of thousands of +applications and databases. + +![Docker L](docs/theme/mkdocs/images/docker-logo-compressed.png "Docker") + +## Security Disclosure + +Security is very important to us. If you have any issue regarding security, +please disclose the information responsibly by sending an email to +security@docker.com and not by creating a github issue. + +## Better than VMs + +A common method for distributing applications and sandboxing their +execution is to use virtual machines, or VMs. Typical VM formats are +VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory +these formats should allow every developer to automatically package +their application into a "machine" for easy distribution and deployment. +In practice, that almost never happens, for a few reasons: + + * *Size*: VMs are very large which makes them impractical to store + and transfer. + * *Performance*: running VMs consumes significant CPU and memory, + which makes them impractical in many scenarios, for example local + development of multi-tier applications, and large-scale deployment + of cpu and memory-intensive applications on large numbers of + machines. + * *Portability*: competing VM environments don't play well with each + other. Although conversion tools do exist, they are limited and + add even more overhead. + * *Hardware-centric*: VMs were designed with machine operators in + mind, not software developers. As a result, they offer very + limited tooling for what developers need most: building, testing + and running their software. For example, VMs offer no facilities + for application versioning, monitoring, configuration, logging or + service discovery. + +By contrast, Docker relies on a different sandboxing method known as +*containerization*. Unlike traditional virtualization, containerization +takes place at the kernel level. Most modern operating system kernels +now support the primitives necessary for containerization, including +Linux with [openvz](http://openvz.org), +[vserver](http://linux-vserver.org) and more recently +[lxc](http://lxc.sourceforge.net), Solaris with +[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc) +and FreeBSD with +[Jails](http://www.freebsd.org/doc/handbook/jails.html). + +Docker builds on top of these low-level primitives to offer developers a +portable format and runtime environment that solves all 4 problems. +Docker containers are small (and their transfer can be optimized with +layers), they have basically zero memory and cpu overhead, they are +completely portable and are designed from the ground up with an +application-centric design. + +The best part: because Docker operates at the OS level, it can still be +run inside a VM! + +## Plays well with others + +Docker does not require that you buy into a particular programming +language, framework, packaging system or configuration language. + +Is your application a Unix process? Does it use files, tcp connections, +environment variables, standard Unix streams and command-line arguments +as inputs and outputs? Then Docker can run it. + +Can your application's build be expressed as a sequence of such +commands? Then Docker can build it. + +## Escape dependency hell + +A common problem for developers is the difficulty of managing all +their application's dependencies in a simple and automated way. + +This is usually difficult for several reasons: + + * *Cross-platform dependencies*. Modern applications often depend on + a combination of system libraries and binaries, language-specific + packages, framework-specific modules, internal components + developed for another project, etc. These dependencies live in + different "worlds" and require different tools - these tools + typically don't work well with each other, requiring awkward + custom integrations. + + * Conflicting dependencies. Different applications may depend on + different versions of the same dependency. Packaging tools handle + these situations with various degrees of ease - but they all + handle them in different and incompatible ways, which again forces + the developer to do extra work. + + * Custom dependencies. A developer may need to prepare a custom + version of their application's dependency. Some packaging systems + can handle custom versions of a dependency, others can't - and all + of them handle it differently. + + +Docker solves dependency hell by giving the developer a simple way to +express *all* their application's dependencies in one place, and +streamline the process of assembling them. If this makes you think of +[XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't +*replace* your favorite packaging systems. It simply orchestrates +their use in a simple and repeatable way. How does it do that? With +layers. + +Docker defines a build as running a sequence of Unix commands, one +after the other, in the same container. Build commands modify the +contents of the container (usually by installing new files on the +filesystem), the next command modifies it some more, etc. Since each +build command inherits the result of the previous commands, the +*order* in which the commands are executed expresses *dependencies*. + +Here's a typical Docker build process: + +```bash +FROM ubuntu:12.04 +RUN apt-get update && apt-get install -y python python-pip curl +RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN cd helloflask-master && pip install -r requirements.txt +``` + +Note that Docker doesn't care *how* dependencies are built - as long +as they can be built by running a Unix command in a container. + + +Getting started +=============== + +Docker can be installed on your local machine as well as servers - both +bare metal and virtualized. It is available as a binary on most modern +Linux systems, or as a VM on Windows, Mac and other systems. + +We also offer an [interactive tutorial](http://www.docker.com/tryit/) +for quickly learning the basics of using Docker. + +For up-to-date install instructions, see the [Docs](http://docs.docker.com). + +Usage examples +============== + +Docker can be used to run short-lived commands, long-running daemons +(app servers, databases etc.), interactive shell sessions, etc. + +You can find a [list of real-world +examples](http://docs.docker.com/examples/) in the +documentation. + +Under the hood +-------------- + +Under the hood, Docker is built on the following components: + +* The + [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) + and + [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) + capabilities of the Linux kernel; +* The [Go](http://golang.org) programming language. + +Contributing to Docker +====================== + +[![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker) +[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker) + +Want to hack on Docker? Awesome! There are instructions to get you +started [here](CONTRIBUTING.md). + +They are probably not perfect, please let us know if anything feels +wrong or incomplete. + +### Legal + +*Brought to you courtesy of our legal counsel. For more context, +please see the Notice document.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text. + diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..31e5c843 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.3.3 diff --git a/api/MAINTAINERS b/api/MAINTAINERS new file mode 100644 index 00000000..e0f18f14 --- /dev/null +++ b/api/MAINTAINERS @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff --git a/api/README.md b/api/README.md new file mode 100644 index 00000000..453f61a1 --- /dev/null +++ b/api/README.md @@ -0,0 +1,5 @@ +This directory contains code pertaining to the Docker API: + + - Used by the docker client when communicating with the docker daemon + + - Used by third party tools wishing to interface with the docker daemon diff --git a/api/api_unit_test.go b/api/api_unit_test.go new file mode 100644 index 00000000..678331d3 --- /dev/null +++ b/api/api_unit_test.go @@ -0,0 +1,19 @@ +package api + +import ( + "testing" +) + +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} diff --git a/api/client/cli.go b/api/client/cli.go new file mode 100644 index 00000000..6bc3fc35 --- /dev/null +++ b/api/client/cli.go @@ -0,0 +1,148 @@ +package client + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io" + "os" + "reflect" + "strings" + "text/template" + + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" +) + +type DockerCli struct { + proto string + addr string + configFile *registry.ConfigFile + in io.ReadCloser + out io.Writer + err io.Writer + key libtrust.PrivateKey + tlsConfig *tls.Config + scheme string + // inFd holds file descriptor of the client's STDIN, if it's a valid file + inFd uintptr + // outFd holds file descriptor of the client's STDOUT, if it's a valid file + outFd uintptr + // isTerminalIn describes if client's STDIN is a TTY + isTerminalIn bool + // isTerminalOut describes if client's STDOUT is a TTY + isTerminalOut bool +} + +var funcMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) { + camelArgs := make([]string, len(args)) + for i, s := range args { + if len(s) == 0 { + return nil, false + } + camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) + } + methodName := "Cmd" + strings.Join(camelArgs, "") + method := reflect.ValueOf(cli).MethodByName(methodName) + if !method.IsValid() { + return nil, false + } + return method.Interface().(func(...string) error), true +} + +// Cmd executes the specified command +func (cli *DockerCli) Cmd(args ...string) error { + if len(args) > 1 { + method, exists := cli.getMethod(args[:2]...) + if exists { + return method(args[2:]...) + } + } + if len(args) > 0 { + method, exists := cli.getMethod(args[0]) + if !exists { + fmt.Println("Error: Command not found:", args[0]) + return cli.CmdHelp(args[1:]...) + } + return method(args[1:]...) + } + return cli.CmdHelp(args...) +} + +func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { + flags := flag.NewFlagSet(name, flag.ContinueOnError) + flags.Usage = func() { + options := "" + if flags.FlagCountUndeprecated() > 0 { + options = "[OPTIONS] " + } + fmt.Fprintf(cli.err, "\nUsage: docker %s %s%s\n\n%s\n\n", name, options, signature, description) + flags.PrintDefaults() + os.Exit(2) + } + return flags +} + +func (cli *DockerCli) LoadConfigFile() (err error) { + cli.configFile, err = registry.LoadConfig(os.Getenv("HOME")) + if err != nil { + fmt.Fprintf(cli.err, "WARNING: %s\n", err) + } + return err +} + +func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, proto, addr string, tlsConfig *tls.Config) *DockerCli { + var ( + inFd uintptr + outFd uintptr + isTerminalIn = false + isTerminalOut = false + scheme = "http" + ) + + if tlsConfig != nil { + scheme = "https" + } + + if in != nil { + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = term.IsTerminal(inFd) + } + } + + if out != nil { + if file, ok := out.(*os.File); ok { + outFd = file.Fd() + isTerminalOut = term.IsTerminal(outFd) + } + } + + if err == nil { + err = out + } + + return &DockerCli{ + proto: proto, + addr: addr, + in: in, + out: out, + err: err, + key: key, + inFd: inFd, + outFd: outFd, + isTerminalIn: isTerminalIn, + isTerminalOut: isTerminalOut, + tlsConfig: tlsConfig, + scheme: scheme, + } +} diff --git a/api/client/commands.go b/api/client/commands.go new file mode 100644 index 00000000..2c44bb63 --- /dev/null +++ b/api/client/commands.go @@ -0,0 +1,2548 @@ +package client + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "text/tabwriter" + "text/template" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/nat" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/timeutils" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +const ( + tarHeaderSize = 512 +) + +func (cli *DockerCli) CmdHelp(args ...string) error { + if len(args) > 1 { + method, exists := cli.getMethod(args[:2]...) + if exists { + method("--help") + return nil + } + } + if len(args) > 0 { + method, exists := cli.getMethod(args[0]) + if !exists { + fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0]) + } else { + method("--help") + return nil + } + } + + flag.Usage() + + return nil +} + +func (cli *DockerCli) CmdBuild(args ...string) error { + cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH") + tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") + noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") + forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var ( + context archive.Archive + isRemote bool + err error + ) + + _, err = exec.LookPath("git") + hasGit := err == nil + if cmd.Arg(0) == "-" { + // As a special case, 'docker build -' will build from either an empty context with the + // contents of stdin as a Dockerfile, or a tar-ed context from stdin. + buf := bufio.NewReader(cli.in) + magic, err := buf.Peek(tarHeaderSize) + if err != nil && err != io.EOF { + return fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + if !archive.IsArchive(magic) { + dockerfile, err := ioutil.ReadAll(buf) + if err != nil { + return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err) + } + context, err = archive.Generate("Dockerfile", string(dockerfile)) + } else { + context = ioutil.NopCloser(buf) + } + } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { + isRemote = true + } else { + root := cmd.Arg(0) + if utils.IsGIT(root) { + remoteURL := cmd.Arg(0) + if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { + remoteURL = "https://" + remoteURL + } + + root, err = ioutil.TempDir("", "docker-build-git") + if err != nil { + return err + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + if _, err := os.Stat(root); err != nil { + return err + } + filename := path.Join(root, "Dockerfile") + if _, err = os.Stat(filename); os.IsNotExist(err) { + return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) + } + var excludes []string + ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("Error reading .dockerignore: '%s'", err) + } + for _, pattern := range strings.Split(string(ignore), "\n") { + ok, err := filepath.Match(pattern, "Dockerfile") + if err != nil { + return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err) + } + if ok { + return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern) + } + excludes = append(excludes, pattern) + } + if err = utils.ValidateContextDirectory(root, excludes); err != nil { + return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) + } + options := &archive.TarOptions{ + Compression: archive.Uncompressed, + Excludes: excludes, + } + context, err = archive.TarWithOptions(root, options) + if err != nil { + return err + } + } + var body io.Reader + // Setup an upload progress bar + // FIXME: ProgressReader shouldn't be this annoying to use + if context != nil { + sf := utils.NewStreamFormatter(false) + body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Sending build context to Docker daemon") + } + // Send the build context + v := &url.Values{} + + //Check if the given image name can be resolved + if *tag != "" { + repository, tag := parsers.ParseRepositoryTag(*tag) + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + if len(tag) > 0 { + if err := graph.ValidateTagName(tag); err != nil { + return err + } + } + } + + v.Set("t", *tag) + + if *suppressOutput { + v.Set("q", "1") + } + if isRemote { + v.Set("remote", cmd.Arg(0)) + } + if *noCache { + v.Set("nocache", "1") + } + if *rm { + v.Set("rm", "1") + } else { + v.Set("rm", "0") + } + + if *forceRm { + v.Set("forcerm", "1") + } + + cli.LoadConfigFile() + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(cli.configFile) + if err != nil { + return err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + if context != nil { + headers.Set("Content-Type", "application/tar") + } + err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) + if jerr, ok := err.(*utils.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + return err +} + +// 'docker login': login / register a user to registry service. +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") + + var username, password, email string + + cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") + cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") + cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") + err := cmd.Parse(args) + if err != nil { + return nil + } + serverAddress := registry.IndexServerAddress() + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + promptDefault := func(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } + } + + readInput := func(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) + } + + cli.LoadConfigFile() + authconfig, ok := cli.configFile.Configs[serverAddress] + if !ok { + authconfig = registry.AuthConfig{} + } + + if username == "" { + promptDefault("Username", authconfig.Username) + username = readInput(cli.in, cli.out) + if username == "" { + username = authconfig.Username + } + } + // Assume that a different username means they may not want to use + // the password or email from the config file, so prompt them + if username != authconfig.Username { + if password == "" { + oldState, _ := term.SaveState(cli.inFd) + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + password = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if password == "" { + return fmt.Errorf("Error : Password Required") + } + } + + if email == "" { + promptDefault("Email", authconfig.Email) + email = readInput(cli.in, cli.out) + if email == "" { + email = authconfig.Email + } + } + } else { + // However, if they don't override the username use the + // password or email from the cmd line if specified. IOW, allow + // then to change/overide them. And if not specified, just + // use what's in the config file + if password == "" { + password = authconfig.Password + } + if email == "" { + email = authconfig.Email + } + } + authconfig.Username = username + authconfig.Password = password + authconfig.Email = email + authconfig.ServerAddress = serverAddress + cli.configFile.Configs[serverAddress] = authconfig + + stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) + if statusCode == 401 { + delete(cli.configFile.Configs, serverAddress) + registry.SaveConfig(cli.configFile) + return err + } + if err != nil { + return err + } + var out2 engine.Env + err = out2.Decode(stream) + if err != nil { + cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME")) + return err + } + registry.SaveConfig(cli.configFile) + if out2.Get("Status") != "" { + fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) + } + return nil +} + +// log out from a Docker registry +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") + + if err := cmd.Parse(args); err != nil { + return nil + } + serverAddress := registry.IndexServerAddress() + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + cli.LoadConfigFile() + if _, ok := cli.configFile.Configs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + } else { + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + delete(cli.configFile.Configs, serverAddress) + + if err := registry.SaveConfig(cli.configFile); err != nil { + return fmt.Errorf("Failed to save docker config: %v", err) + } + } + return nil +} + +// 'docker wait': block until a container stops +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + var encounteredError error + for _, name := range cmd.Args() { + status, err := waitForExit(cli, name) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to wait one or more containers") + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + return encounteredError +} + +// 'docker version': show version information +func (cli *DockerCli) CmdVersion(args ...string) error { + cmd := cli.Subcmd("version", "", "Show the Docker version information.") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() > 0 { + cmd.Usage() + return nil + } + if dockerversion.VERSION != "" { + fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) + } + fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION) + fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) + if dockerversion.GITCOMMIT != "" { + fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) + } + fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH) + + body, _, err := readBody(cli.call("GET", "/version", nil, false)) + if err != nil { + return err + } + + out := engine.NewOutput() + remoteVersion, err := out.AddEnv() + if err != nil { + log.Errorf("Error reading remote version: %s", err) + return err + } + if _, err := out.Write(body); err != nil { + log.Errorf("Error reading remote version: %s", err) + return err + } + out.Close() + fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) + if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" { + fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion) + } + fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) + fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) + return nil +} + +// 'docker info': display system-wide information. +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := cli.Subcmd("info", "", "Display system-wide information") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() > 0 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/info", nil, false)) + if err != nil { + return err + } + + out := engine.NewOutput() + remoteInfo, err := out.AddEnv() + if err != nil { + return err + } + + if _, err := out.Write(body); err != nil { + log.Errorf("Error reading remote info: %s", err) + return err + } + out.Close() + + fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) + fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) + fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) + var driverStatus [][2]string + if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { + return err + } + for _, pair := range driverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + } + fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) + fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) + fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) + + if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) + fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") + fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) + fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) + fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) + + if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { + fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) + } + if initPath := remoteInfo.Get("InitPath"); initPath != "" { + fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) + } + } + + if len(remoteInfo.GetList("IndexServerAddress")) != 0 { + cli.LoadConfigFile() + u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) + } + } + if !remoteInfo.GetBool("MemoryLimit") { + fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") + } + if !remoteInfo.GetBool("SwapLimit") { + fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") + } + if !remoteInfo.GetBool("IPv4Forwarding") { + fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") + } + return nil +} + +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a grace period") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to stop one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to restart one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == syscall.SIGCHLD { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + log.Errorf("Unsupported signal: %d. Discarding.", s) + } + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { + log.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +func (cli *DockerCli) CmdStart(args ...string) error { + var ( + cErr chan error + tty bool + + cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") + attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's STDOUT and STDERR and forward all signals to the process") + openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") + ) + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + if *attach || *openStdin { + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) + if err != nil { + return err + } + + env := engine.Env{} + if err := env.Decode(steam); err != nil { + return err + } + config := env.GetSubEnv("Config") + tty = config.GetBool("Tty") + + if !tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + + if *openStdin && config.GetBool("OpenStdin") { + v.Set("stdin", "1") + in = cli.in + } + + v.Set("stdout", "1") + v.Set("stderr", "1") + + cErr = promise.Go(func() error { + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil) + }) + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) + if err != nil { + if !*attach || !*openStdin { + fmt.Fprintf(cli.err, "%s\n", err) + } + encounteredError = fmt.Errorf("Error: failed to start one or more containers") + } else { + if !*attach || !*openStdin { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + } + if encounteredError != nil { + if *openStdin || *attach { + cli.in.Close() + } + return encounteredError + } + + if *openStdin || *attach { + if tty && cli.isTerminalOut { + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + log.Errorf("Error monitoring TTY size: %s", err) + } + } + return <-cErr + } + return nil +} + +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := cli.Subcmd("unpause", "CONTAINER", "Unpause all processes within a container") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := cli.Subcmd("pause", "CONTAINER", "Pause all processes within a container") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to pause container named %s", name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image") + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var tmpl *template.Template + if *tmplStr != "" { + var err error + if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { + fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) + return &utils.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + } + + indented := new(bytes.Buffer) + indented.WriteByte('[') + status := 0 + + for _, name := range cmd.Args() { + obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + if err != nil { + obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) + if err != nil { + if strings.Contains(err.Error(), "No such") { + fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + } + + if tmpl == nil { + if err = json.Indent(indented, obj, "", " "); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } else { + // Has template, will render + var value interface{} + if err := json.Unmarshal(obj, &value); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, value); err != nil { + return err + } + cli.out.Write([]byte{'\n'}) + } + indented.WriteString(",") + } + + if indented.Len() > 1 { + // Remove trailing ',' + indented.Truncate(indented.Len() - 1) + } + indented.WriteByte(']') + + if tmpl == nil { + if _, err := io.Copy(cli.out, indented); err != nil { + return err + } + } + + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() == 0 { + cmd.Usage() + return nil + } + val := url.Values{} + if cmd.NArg() > 1 { + val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) + } + + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) + if err != nil { + return err + } + var procs engine.Env + if err := procs.Decode(stream); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + return err + } + for _, proc := range processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} + +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) + if err != nil { + return err + } + + env := engine.Env{} + if err := env.Decode(steam); err != nil { + return err + } + ports := nat.PortMap{} + if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil { + return err + } + + if cmd.NArg() == 2 { + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) + } + + for from, frontends := range ports { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort) + } + } + + return nil +} + +// 'docker rmi IMAGE' removes all images with the name IMAGE +func (cli *DockerCli) CmdRmi(args ...string) error { + var ( + cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") + force = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") + noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + ) + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var encounteredError error + for _, name := range cmd.Args() { + body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more images") + } else { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more images") + continue + } + for _, out := range outs.Data { + if out.Get("Deleted") != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) + } + } + } + } + return encounteredError +} + +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") + } + + for _, out := range outs.Data { + outID := out.Get("Id") + if !*quiet { + if *noTrunc { + fmt.Fprintf(w, "%s\t", outID) + } else { + fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) + } + + fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) + + if *noTrunc { + fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) + } else { + fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) + } + fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("Size"))) + } else { + if *noTrunc { + fmt.Fprintln(w, outID) + } else { + fmt.Fprintln(w, utils.TruncateID(outID)) + } + } + } + w.Flush() + return nil +} + +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers") + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") + link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + val := url.Values{} + if *v { + val.Set("v", "1") + } + if *link { + val.Set("link", "1") + } + + if *force { + val.Set("force", "1") + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +// 'docker kill NAME' kills a running container +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal") + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to kill one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var ( + v = url.Values{} + src = cmd.Arg(0) + repository = cmd.Arg(1) + ) + + v.Set("fromSrc", src) + v.Set("repo", repository) + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") + v.Set("tag", cmd.Arg(2)) + } + + if repository != "" { + //Check if the given image name can be resolved + repo, _ := parsers.ParseRepositoryTag(repository) + if _, _, err := registry.ResolveRepositoryName(repo); err != nil { + return err + } + } + + var in io.Reader + + if src == "-" { + in = cli.in + } + + return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) +} + +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry") + if err := cmd.Parse(args); err != nil { + return nil + } + name := cmd.Arg(0) + + if name == "" { + cmd.Usage() + return nil + } + + cli.LoadConfigFile() + + remote, tag := parsers.ParseRepositoryTag(name) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + // If we're not using a custom registry, we know the restrictions + // applied to repository names and can warn the user in advance. + // Custom repositories can have different rules, and we must also + // allow pushing by image ID. + if len(strings.SplitN(name, "/", 2)) == 1 { + username := cli.configFile.Configs[registry.IndexServerAddress()].Username + if username == "" { + username = "" + } + return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) + } + + v := url.Values{} + v.Set("tag", tag) + push := func(authConfig registry.AuthConfig) error { + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + return cli.stream("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + } + + if err := push(authConfig); err != nil { + if strings.Contains(err.Error(), "Status 401") { + fmt.Fprintln(cli.out, "\nPlease login prior to push:") + if err := cli.CmdLogin(hostname); err != nil { + return err + } + authConfig := cli.configFile.ResolveAuthConfig(hostname) + return push(authConfig) + } + return err + } + return nil +} + +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry") + allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + var ( + v = url.Values{} + remote = cmd.Arg(0) + newRemote = remote + ) + taglessRemote, tag := parsers.ParseRepositoryTag(remote) + if tag == "" && !*allTags { + newRemote = taglessRemote + ":latest" + } + if tag != "" && *allTags { + return fmt.Errorf("tag can't be used with --all-tags/-a") + } + + v.Set("fromImage", newRemote) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(taglessRemote) + if err != nil { + return err + } + + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + + pull := func(authConfig registry.AuthConfig) error { + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + } + + if err := pull(authConfig); err != nil { + if strings.Contains(err.Error(), "Status 401") { + fmt.Fprintln(cli.out, "\nPlease login prior to pull:") + if err := cli.CmdLogin(hostname); err != nil { + return err + } + authConfig := cli.configFile.ResolveAuthConfig(hostname) + return pull(authConfig) + } + return err + } + + return nil +} + +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := cli.Subcmd("images", "[NAME]", "List images") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + // FIXME: --viz and --tree are deprecated. Remove them in a future version. + flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") + flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() > 1 { + cmd.Usage() + return nil + } + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.Args{} + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + + matchName := cmd.Arg(0) + // FIXME: --viz and --tree are deprecated. Remove them in a future version. + if *flViz || *flTree { + v := url.Values{ + "all": []string{"1"}, + } + if len(imageFilterArgs) > 0 { + filterJson, err := filters.ToParam(imageFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } + + body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + var ( + printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) + startImage *engine.Env + + roots = engine.NewTable("Created", outs.Len()) + byParent = make(map[string]*engine.Table) + ) + + for _, image := range outs.Data { + if image.Get("ParentId") == "" { + roots.Add(image) + } else { + if children, exists := byParent[image.Get("ParentId")]; exists { + children.Add(image) + } else { + byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) + byParent[image.Get("ParentId")].Add(image) + } + } + + if matchName != "" { + if matchName == image.Get("Id") || matchName == utils.TruncateID(image.Get("Id")) { + startImage = image + } + + for _, repotag := range image.GetList("RepoTags") { + if repotag == matchName { + startImage = image + } + } + } + } + + if *flViz { + fmt.Fprintf(cli.out, "digraph docker {\n") + printNode = (*DockerCli).printVizNode + } else { + printNode = (*DockerCli).printTreeNode + } + + if startImage != nil { + root := engine.NewTable("Created", 1) + root.Add(startImage) + cli.WalkTree(*noTrunc, root, byParent, "", printNode) + } else if matchName == "" { + cli.WalkTree(*noTrunc, roots, byParent, "", printNode) + } + if *flViz { + fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") + } + } else { + v := url.Values{} + if len(imageFilterArgs) > 0 { + filterJson, err := filters.ToParam(imageFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } + + if cmd.NArg() == 1 { + // FIXME rename this parameter, to not be confused with the filters flag + v.Set("filter", matchName) + } + if *all { + v.Set("all", "1") + } + + body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) + + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + } + + for _, out := range outs.Data { + for _, repotag := range out.GetList("RepoTags") { + + repo, tag := parsers.ParseRepositoryTag(repotag) + outID := out.Get("Id") + if !*noTrunc { + outID = utils.TruncateID(outID) + } + + if !*quiet { + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(out.GetInt64("VirtualSize"))) + } else { + fmt.Fprintln(w, outID) + } + } + } + + if !*quiet { + w.Flush() + } + } + return nil +} + +// FIXME: --viz and --tree are deprecated. Remove them in a future version. +func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { + length := images.Len() + if length > 1 { + for index, image := range images.Data { + if index+1 == length { + printNode(cli, noTrunc, image, prefix+"└─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) + } + } else { + printNode(cli, noTrunc, image, prefix+"\u251C─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) + } + } + } + } else { + for _, image := range images.Data { + printNode(cli, noTrunc, image, prefix+"└─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) + } + } + } +} + +// FIXME: --viz and --tree are deprecated. Remove them in a future version. +func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { + var ( + imageID string + parentID string + ) + if noTrunc { + imageID = image.Get("Id") + parentID = image.Get("ParentId") + } else { + imageID = utils.TruncateID(image.Get("Id")) + parentID = utils.TruncateID(image.Get("ParentId")) + } + if parentID == "" { + fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) + } else { + fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) + } + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", + imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) + } +} + +// FIXME: --viz and --tree are deprecated. Remove them in a future version. +func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { + var imageID string + if noTrunc { + imageID = image.Get("Id") + } else { + imageID = utils.TruncateID(image.Get("Id")) + } + + fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(image.GetInt64("VirtualSize"))) + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) + } else { + fmt.Fprint(cli.out, "\n") + } +} + +func (cli *DockerCli) CmdPs(args ...string) error { + var ( + err error + + psFilterArgs = filters.Args{} + v = url.Values{} + + cmd = cli.Subcmd("ps", "", "List containers") + quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size = cmd.Bool([]string{"s", "-size"}, false, "Display sizes") + all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") + noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") + since = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.") + before = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.") + last = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") + flFilter = opts.NewListOpts(nil) + ) + + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values. Valid filters:\nexited= - containers with exit code of \nstatus=(restarting|running|paused|exited)") + + if err := cmd.Parse(args); err != nil { + return nil + } + + if *last == -1 && *nLatest { + *last = 1 + } + + if *all { + v.Set("all", "1") + } + + if *last != -1 { + v.Set("limit", strconv.Itoa(*last)) + } + + if *since != "" { + v.Set("since", *since) + } + + if *before != "" { + v.Set("before", *before) + } + + if *size { + v.Set("size", "1") + } + + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + for _, f := range flFilter.GetAll() { + if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { + return err + } + } + + if len(psFilterArgs) > 0 { + filterJson, err := filters.ToParam(psFilterArgs) + if err != nil { + return err + } + + v.Set("filters", filterJson) + } + + body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") + + if *size { + fmt.Fprintln(w, "\tSIZE") + } else { + fmt.Fprint(w, "\n") + } + } + + stripNamePrefix := func(ss []string) []string { + for i, s := range ss { + ss[i] = s[1:] + } + + return ss + } + + for _, out := range outs.Data { + outID := out.Get("Id") + + if !*noTrunc { + outID = utils.TruncateID(outID) + } + + if *quiet { + fmt.Fprintln(w, outID) + + continue + } + + var ( + outNames = stripNamePrefix(out.GetList("Names")) + outCommand = strconv.Quote(out.Get("Command")) + ports = engine.NewTable("", 0) + ) + + if !*noTrunc { + outCommand = utils.Trunc(outCommand, 20) + + // only display the default name for the container with notrunc is passed + for _, name := range outNames { + if len(strings.Split(name, "/")) == 1 { + outNames = []string{name} + + break + } + } + } + + ports.ReadListFrom([]byte(out.Get("Ports"))) + + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, + units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), + out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) + + if *size { + if out.GetInt("SizeRootFs") > 0 { + fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(out.GetInt64("SizeRw")), units.HumanSize(out.GetInt64("SizeRootFs"))) + } else { + fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("SizeRw"))) + } + + continue + } + + fmt.Fprint(w, "\n") + } + + if !*quiet { + w.Flush() + } + + return nil +} + +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := cli.Subcmd("commit", "CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") + flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") + if err := cmd.Parse(args); err != nil { + return nil + } + + var ( + name = cmd.Arg(0) + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + ) + + if name == "" || len(cmd.Args()) > 2 { + cmd.Usage() + return nil + } + + //Check if the given image name can be resolved + if repository != "" { + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + + v := url.Values{} + v.Set("container", name) + v.Set("repo", repository) + v.Set("tag", tag) + v.Set("comment", *flComment) + v.Set("author", *flAuthor) + + if *flPause != true { + v.Set("pause", "0") + } + + var ( + config *runconfig.Config + env engine.Env + ) + if *flConfig != "" { + config = &runconfig.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) + if err != nil { + return err + } + if err := env.Decode(stream); err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) + return nil +} + +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := cli.Subcmd("events", "", "Get real time events from the server") + since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") + until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 0 { + cmd.Usage() + return nil + } + var ( + v = url.Values{} + loc = time.FixedZone(time.Now().Zone()) + ) + var setTime = func(key, value string) { + format := timeutils.RFC3339NanoFixed + if len(value) < len(format) { + format = format[:len(value)] + } + if t, err := time.ParseInLocation(format, value, loc); err == nil { + v.Set(key, strconv.FormatInt(t.Unix(), 10)) + } else { + v.Set(key, value) + } + } + if *since != "" { + setTime("since", *since) + } + if *until != "" { + setTime("until", *until) + } + if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) + + if err != nil { + return err + } + + outs := engine.NewTable("", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + for _, change := range outs.Data { + var kind string + switch change.GetInt("Kind") { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) + } + return nil +} + +func (cli *DockerCli) CmdLogs(args ...string) error { + var ( + cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") + follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + tail = cmd.String([]string{"-tail"}, "all", "Output the specified number of lines at the end of logs (defaults to all logs)") + ) + + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + name := cmd.Arg(0) + + steam, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) + if err != nil { + return err + } + + env := engine.Env{} + if err := env.Decode(steam); err != nil { + return err + } + + v := url.Values{} + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *times { + v.Set("timestamps", "1") + } + + if *follow { + v.Set("follow", "1") + } + v.Set("tail", *tail) + + return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil) +} + +func (cli *DockerCli) CmdAttach(args ...string) error { + var ( + cmd = cli.Subcmd("attach", "CONTAINER", "Attach to a running container") + noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") + proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.") + ) + + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + name := cmd.Arg(0) + + stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) + if err != nil { + return err + } + + env := engine.Env{} + if err := env.Decode(stream); err != nil { + return err + } + + if !env.GetSubEnv("State").GetBool("Running") { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + var ( + config = env.GetSubEnv("Config") + tty = config.GetBool("Tty") + ) + + if tty && cli.isTerminalOut { + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + log.Debugf("Error monitoring TTY size: %s", err) + } + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + if !*noStdin && config.GetBool("OpenStdin") { + v.Set("stdin", "1") + in = cli.in + } + + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *proxy && !tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil { + return err + } + + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + + return nil +} + +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") + stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("term", cmd.Arg(0)) + + body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) + + if err != nil { + return err + } + outs := engine.NewTable("star_count", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, out := range outs.Data { + if ((*automated || *trusted) && (!out.GetBool("is_trusted") && !out.GetBool("is_automated"))) || (*stars > out.GetInt("star_count")) { + continue + } + desc := strings.Replace(out.Get("description"), "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = utils.Trunc(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) + if out.GetBool("is_official") { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if out.GetBool("is_automated") || out.GetBool("is_trusted") { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// Ports type - Used to parse multiple -p flags +type ports []int + +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := cli.Subcmd("tag", "IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") + force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 2 { + cmd.Usage() + return nil + } + + var ( + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + v = url.Values{} + ) + + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + v.Set("repo", repository) + v.Set("tag", tag) + + if *force { + v.Set("force", "1") + } + + if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + v := url.Values{} + repos, tag := parsers.ParseRepositoryTag(image) + // pull only the image tagged 'latest' if no tag was specified + if tag == "" { + tag = "latest" + } + v.Set("fromImage", repos) + v.Set("tag", tag) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(repos) + if err != nil { + return err + } + + // Load the auth config file, to be able to pull the image + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { + return err + } + return nil +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (engine.Env, error) { + containerValues := url.Values{} + if name != "" { + containerValues.Set("name", name) + } + + mergedConfig := runconfig.MergeConfigs(config, hostConfig) + + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + //create the container + stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false) + //if image not found try to pull it + if statusCode == 404 { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) + + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + return nil, err + } + // Retry + if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + + var result engine.Env + if err := result.Decode(stream); err != nil { + return nil, err + } + + for _, warning := range result.GetList("Warnings") { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + + if containerIDFile != nil { + if err = containerIDFile.Write(result.Get("Id")); err != nil { + return nil, err + } + } + + return result, nil + +} + +func (cli *DockerCli) CmdCreate(args ...string) error { + cmd := cli.Subcmd("create", "IMAGE [COMMAND] [ARG...]", "Create a new container") + + // These are flags not stored in Config/HostConfig + var ( + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + ) + + config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil) + if err != nil { + return err + } + if config.Image == "" { + cmd.Usage() + return nil + } + + createResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", createResult.Get("Id")) + + return nil +} + +func (cli *DockerCli) CmdRun(args ...string) error { + // FIXME: just use runconfig.Parse already + cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container") + + // These are flags not stored in Config/HostConfig + var ( + flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run the container in the background and print the new container ID") + flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.") + flName = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") + flAttach *opts.ListOpts + + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ) + + config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil) + if err != nil { + return err + } + if config.Image == "" { + cmd.Usage() + return nil + } + + if *flDetach { + if fl := cmd.Lookup("attach"); fl != nil { + flAttach = fl.Value.(*opts.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + if *flAutoRemove { + return ErrConflictDetachAutoRemove + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable flSigProxy in case on TTY + sigProxy := *flSigProxy + if config.Tty { + sigProxy = false + } + + runResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + + if sigProxy { + sigc := cli.forwardAllSignals(runResult.Get("Id")) + defer signal.StopCatch(sigc) + } + + var ( + waitDisplayId chan struct{} + errCh chan error + ) + + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchrone in order to let the client write to stdin before having to read the ID + waitDisplayId = make(chan struct{}) + go func() { + defer close(waitDisplayId) + fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) + }() + } + + if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") { + return ErrConflictRestartPolicyAndAutoRemove + } + + // We need to instanciate the chan because the select needs it. It can + // be closed but can't be uninitialized. + hijacked := make(chan io.Closer) + + // Block the return until the chan gets closed + defer func() { + log.Debugf("End of CmdRun(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + log.Errorf("Hijack did not finish (chan still open)") + } + }() + + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + v = url.Values{} + ) + v.Set("stream", "1") + + if config.AttachStdin { + v.Set("stdin", "1") + in = cli.in + } + if config.AttachStdout { + v.Set("stdout", "1") + out = cli.out + } + if config.AttachStderr { + v.Set("stderr", "1") + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + errCh = promise.Go(func() error { + return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil) + }) + } else { + close(hijacked) + } + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that hijack gets closed when returning. (result + // in closing hijack chan and freeing server's goroutines. + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + log.Debugf("Error hijack: %s", err) + return err + } + } + + //start the container + if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { + return err + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(runResult.Get("Id"), false); err != nil { + log.Errorf("Error monitoring TTY size: %s", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + log.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayId + return nil + } + + var status int + + // Attached mode + if *flAutoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { + return err + } + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } + if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { + return err + } + } else { + if !config.Tty { + // In non-tty mode, we can't dettach, so we know we need to wait. + if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { + return err + } + } else { + // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call + // and result in a wrong exit code. + // No Autoremove: Simply retrieve the exit code + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } + } + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 2 { + cmd.Usage() + return nil + } + + var copyData engine.Env + info := strings.Split(cmd.Arg(0), ":") + + if len(info) != 2 { + return fmt.Errorf("Error: Path not specified") + } + + copyData.Set("Resource", info[1]) + copyData.Set("HostPath", cmd.Arg(1)) + + stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) + if stream != nil { + defer stream.Close() + } + if statusCode == 404 { + return fmt.Errorf("No such container: %v", info[0]) + } + if err != nil { + return err + } + + if statusCode == 200 { + if err := archive.Untar(stream, copyData.Get("HostPath"), &archive.TarOptions{NoLchown: true}); err != nil { + return err + } + } + return nil +} + +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := cli.Subcmd("save", "IMAGE [IMAGE...]", "Save an image(s) to a tar archive (streamed to STDOUT by default)") + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + + if err := cmd.Parse(args); err != nil { + return err + } + + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var ( + output io.Writer = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } + if len(cmd.Args()) == 1 { + image := cmd.Arg(0) + if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { + return err + } + } else { + v := url.Values{} + for _, arg := range cmd.Args() { + v.Add("names", arg) + } + if err := cli.stream("GET", "/images/get?"+v.Encode(), nil, output, nil); err != nil { + return err + } + } + return nil +} + +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + + if err := cmd.Parse(args); err != nil { + return err + } + + if cmd.NArg() != 0 { + cmd.Usage() + return nil + } + + var ( + input io.Reader = cli.in + err error + ) + if *infile != "" { + input, err = os.Open(*infile) + if err != nil { + return err + } + } + if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdExec(args ...string) error { + cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in an existing container") + + execConfig, err := runconfig.ParseExec(cmd, args) + if err != nil { + return err + } + if execConfig.Container == "" { + cmd.Usage() + return nil + } + + stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, false) + if err != nil { + return err + } + + var execResult engine.Env + if err := execResult.Decode(stream); err != nil { + return err + } + + execID := execResult.Get("Id") + + if execID == "" { + fmt.Fprintf(cli.out, "exec ID empty") + return nil + } + + if execConfig.Detach { + if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil { + return err + } + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + hijacked = make(chan io.Closer) + errCh chan error + ) + + // Block the return until the chan gets closed + defer func() { + log.Debugf("End of CmdExec(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + log.Errorf("Hijack did not finish (chan still open)") + } + }() + + if execConfig.AttachStdin { + in = cli.in + } + if execConfig.AttachStdout { + out = cli.out + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + errCh = promise.Go(func() error { + return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig) + }) + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that hijack gets closed when returning. (result + // in closing hijack chan and freeing server's goroutines. + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + log.Debugf("Error hijack: %s", err) + return err + } + } + + if execConfig.Tty && cli.isTerminalIn { + if err := cli.monitorTtySize(execID, true); err != nil { + log.Errorf("Error monitoring TTY size: %s", err) + } + } + + if err := <-errCh; err != nil { + log.Debugf("Error hijack: %s", err) + return err + } + + return nil +} diff --git a/api/client/hijack.go b/api/client/hijack.go new file mode 100644 index 00000000..d0b5e93e --- /dev/null +++ b/api/client/hijack.go @@ -0,0 +1,230 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "os" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if cwc, ok := c.rawConn.(interface { + CloseWrite() error + }); ok { + return cwc.CloseWrite() + } + return nil +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + rawConn, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func (cli *DockerCli) dial() (net.Conn, error) { + if cli.tlsConfig != nil && cli.proto != "unix" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(cli.proto, cli.addr, cli.tlsConfig) + } + return net.Dial(cli.proto, cli.addr) +} + +func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error { + defer func() { + if started != nil { + close(started) + } + }() + + params, err := cli.encodeData(data) + if err != nil { + return err + } + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Header.Set("Content-Type", "plain/text") + req.Host = cli.addr + + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + clientconn.Do(req) + + rwc, br := clientconn.Hijack() + defer rwc.Close() + + if started != nil { + started <- rwc + } + + var receiveStdout chan error + + var oldState *term.State + + if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { + oldState, err = term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + defer term.RestoreTerminal(cli.inFd, oldState) + } + + if stdout != nil || stderr != nil { + receiveStdout = promise.Go(func() (err error) { + defer func() { + if in != nil { + if setRawTerminal && cli.isTerminalIn { + term.RestoreTerminal(cli.inFd, oldState) + } + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if runtime.GOOS != "darwin" { + in.Close() + } + } + }() + + // When TTY is ON, use regular copy + if setRawTerminal && stdout != nil { + _, err = io.Copy(stdout, br) + } else { + _, err = stdcopy.StdCopy(stdout, stderr, br) + } + log.Debugf("[hijack] End of stdout") + return err + }) + } + + sendStdin := promise.Go(func() error { + if in != nil { + io.Copy(rwc, in) + log.Debugf("[hijack] End of stdin") + } + + if conn, ok := rwc.(interface { + CloseWrite() error + }); ok { + if err := conn.CloseWrite(); err != nil { + log.Debugf("Couldn't send EOF: %s", err) + } + } + // Discard errors due to pipe interruption + return nil + }) + + if stdout != nil || stderr != nil { + if err := <-receiveStdout; err != nil { + log.Debugf("Error receiveStdout: %s", err) + return err + } + } + + if !cli.isTerminalIn { + if err := <-sendStdin; err != nil { + log.Debugf("Error sendStdin: %s", err) + return err + } + } + return nil +} diff --git a/api/client/utils.go b/api/client/utils.go new file mode 100644 index 00000000..58b730bd --- /dev/null +++ b/api/client/utils.go @@ -0,0 +1,288 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + gosignal "os/signal" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +var ( + ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") +) + +func (cli *DockerCli) HTTPClient() *http.Client { + tr := &http.Transport{ + TLSClientConfig: cli.tlsConfig, + Dial: func(network, addr string) (net.Conn, error) { + // Why 32? See issue 8035 + return net.DialTimeout(cli.proto, cli.addr, 32*time.Second) + }, + } + if cli.proto == "unix" { + // XXX workaround for net/http Transport which caches connections, but is + // intended for tcp connections, not unix sockets. + tr.DisableKeepAlives = true + + // no need in compressing for local communications + tr.DisableCompression = true + } + return &http.Client{Transport: tr} +} + +func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if env, ok := data.(engine.Env); ok { + if err := env.Encode(params); err != nil { + return nil, err + } + } else { + buf, err := json.Marshal(data) + if err != nil { + return nil, err + } + if _, err := params.Write(buf); err != nil { + return nil, err + } + } + } + return params, nil +} + +func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { + params, err := cli.encodeData(data) + if err != nil { + return nil, -1, err + } + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) + if err != nil { + return nil, -1, err + } + if passAuthInfo { + cli.LoadConfigFile() + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress()) + getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return nil, err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil + } + if headers, err := getHeaders(authConfig); err == nil && headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + if data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + resp, err := cli.HTTPClient().Do(req) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, -1, ErrConnectionRefused + } + return nil, -1, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, -1, err + } + if len(body) == 0 { + return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) + } + return nil, resp.StatusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) + } + + return resp.Body, resp.StatusCode, nil +} + +func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { + return cli.streamHelper(method, path, true, in, out, nil, headers) +} + +func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error { + if (method == "POST" || method == "PUT") && in == nil { + in = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + resp, err := cli.HTTPClient().Do(req) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) + } + return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) + } + + if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { + return utils.DisplayJSONMessagesStream(resp.Body, stdout, cli.outFd, cli.isTerminalOut) + } + if stdout != nil || stderr != nil { + // When TTY is ON, use regular copy + if setRawTerminal { + _, err = io.Copy(stdout, resp.Body) + } else { + _, err = stdcopy.StdCopy(stdout, stderr, resp.Body) + } + log.Debugf("[stream] End of stdout") + return err + } + return nil +} + +func (cli *DockerCli) resizeTty(id string, isExec bool) { + height, width := cli.getTtySize() + if height == 0 && width == 0 { + return + } + v := url.Values{} + v.Set("h", strconv.Itoa(height)) + v.Set("w", strconv.Itoa(width)) + + path := "" + if !isExec { + path = "/containers/" + id + "/resize?" + } else { + path = "/exec/" + id + "/resize?" + } + + if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, false)); err != nil { + log.Debugf("Error resize: %s", err) + } +} + +func waitForExit(cli *DockerCli, containerId string) (int, error) { + stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) + if err != nil { + return -1, err + } + + var out engine.Env + if err := out.Decode(stream); err != nil { + return -1, err + } + return out.GetInt("StatusCode"), nil +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { + steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != ErrConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + + var result engine.Env + if err := result.Decode(steam); err != nil { + return false, -1, err + } + + state := result.GetSubEnv("State") + return state.GetBool("Running"), state.GetInt("ExitCode"), nil +} + +func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { + cli.resizeTty(id, isExec) + + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, syscall.SIGWINCH) + go func() { + for _ = range sigchan { + cli.resizeTty(id, isExec) + } + }() + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminalOut { + return 0, 0 + } + ws, err := term.GetWinsize(cli.outFd) + if err != nil { + log.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { + if stream != nil { + defer stream.Close() + } + if err != nil { + return nil, statusCode, err + } + body, err := ioutil.ReadAll(stream) + if err != nil { + return nil, -1, err + } + return body, statusCode, nil +} diff --git a/api/common.go b/api/common.go new file mode 100644 index 00000000..3eecaa04 --- /dev/null +++ b/api/common.go @@ -0,0 +1,49 @@ +package api + +import ( + "fmt" + "mime" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/version" +) + +const ( + APIVERSION version.Version = "1.15" + DEFAULTHTTPHOST = "127.0.0.1" + DEFAULTUNIXSOCKET = "/var/run/docker.sock" +) + +func ValidateHost(val string) (string, error) { + host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) + if err != nil { + return val, err + } + return host, nil +} + +//TODO remove, used on < 1.5 in getContainersJSON +func DisplayablePorts(ports *engine.Table) string { + result := []string{} + ports.SetKey("PublicPort") + ports.Sort() + for _, port := range ports.Data { + if port.Get("IP") == "" { + result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PrivatePort"), port.Get("Type"))) + } else { + result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) + } + } + return strings.Join(result, ", ") +} + +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) + } + return err == nil && mimetype == expectedType +} diff --git a/api/server/MAINTAINERS b/api/server/MAINTAINERS new file mode 100644 index 00000000..c92a0611 --- /dev/null +++ b/api/server/MAINTAINERS @@ -0,0 +1,2 @@ +Victor Vieux (@vieux) +Johan Euphrosine (@proppy) diff --git a/api/server/server.go b/api/server/server.go new file mode 100644 index 00000000..93b8b60a --- /dev/null +++ b/api/server/server.go @@ -0,0 +1,1532 @@ +package server + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "expvar" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/pprof" + "os" + "strconv" + "strings" + "syscall" + + "code.google.com/p/go.net/websocket" + "github.com/docker/libcontainer/user" + "github.com/gorilla/mux" + + "github.com/docker/docker/api" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/listenbuffer" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/systemd" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +var ( + activationLock chan struct{} +) + +type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// Check to make sure request's Content-Type is application/json +func checkForJson(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +//If we don't do this, POST method without Content-type (even with empty body) will fail +func parseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func parseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func httpError(w http.ResponseWriter, err error) { + statusCode := http.StatusInternalServerError + // FIXME: this is brittle and should not be necessary. + // If we need to differentiate between different possible error types, we should + // create appropriate error types with clearly defined meaning. + if strings.Contains(err.Error(), "No such") { + statusCode = http.StatusNotFound + } else if strings.Contains(err.Error(), "Bad parameter") { + statusCode = http.StatusBadRequest + } else if strings.Contains(err.Error(), "Conflict") { + statusCode = http.StatusConflict + } else if strings.Contains(err.Error(), "Impossible") { + statusCode = http.StatusNotAcceptable + } else if strings.Contains(err.Error(), "Wrong login/password") { + statusCode = http.StatusUnauthorized + } else if strings.Contains(err.Error(), "hasn't been activated") { + statusCode = http.StatusForbidden + } + + if err != nil { + log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) + http.Error(w, err.Error(), statusCode) + } +} + +func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return v.Encode(w) +} + +func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { + w.Header().Set("Content-Type", "application/json") + if flush { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } else { + job.Stdout.Add(w) + } +} + +func getBoolParam(value string) (bool, error) { + if value == "" { + return false, nil + } + ret, err := strconv.ParseBool(value) + if err != nil { + return false, fmt.Errorf("Bad parameter") + } + return ret, nil +} + +func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfig, err = ioutil.ReadAll(r.Body) + job = eng.Job("auth") + stdoutBuffer = bytes.NewBuffer(nil) + ) + if err != nil { + return err + } + job.Setenv("authConfig", string(authConfig)) + job.Stdout.Add(stdoutBuffer) + if err = job.Run(); err != nil { + return err + } + if status := engine.Tail(stdoutBuffer, 1); status != "" { + var env engine.Env + env.Set("Status", status) + return writeJSON(w, http.StatusOK, env) + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + eng.ServeHTTP(w, r) + return nil +} + +func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("kill", vars["name"]) + if sig := r.Form.Get("signal"); sig != "" { + job.Args = append(job.Args, sig) + } + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("pause", vars["name"]) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("unpause", vars["name"]) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("export", vars["name"]) + job.Stdout.Add(w) + if err := job.Run(); err != nil { + return err + } + return nil +} + +func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + err error + outs *engine.Table + job = eng.Job("images") + ) + + job.Setenv("filters", r.Form.Get("filters")) + // FIXME this parameter could just be a match filter + job.Setenv("filter", r.Form.Get("filter")) + job.Setenv("all", r.Form.Get("all")) + + if version.GreaterThanOrEqualTo("1.7") { + streamJSON(job, w, false) + } else if outs, err = job.Stdout.AddListTable(); err != nil { + return err + } + + if err := job.Run(); err != nil { + return err + } + + if version.LessThan("1.7") && outs != nil { // Convert to legacy format + outsLegacy := engine.NewTable("Created", 0) + for _, out := range outs.Data { + for _, repoTag := range out.GetList("RepoTags") { + repo, tag := parsers.ParseRepositoryTag(repoTag) + outLegacy := &engine.Env{} + outLegacy.Set("Repository", repo) + outLegacy.SetJson("Tag", tag) + outLegacy.Set("Id", out.Get("Id")) + outLegacy.SetInt64("Created", out.GetInt64("Created")) + outLegacy.SetInt64("Size", out.GetInt64("Size")) + outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) + outsLegacy.Add(outLegacy) + } + } + w.Header().Set("Content-Type", "application/json") + if _, err := outsLegacy.WriteListTo(w); err != nil { + return err + } + } + return nil +} + +func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.GreaterThan("1.6") { + w.WriteHeader(http.StatusNotFound) + return fmt.Errorf("This is now implemented in the client.") + } + eng.ServeHTTP(w, r) + return nil +} + +func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + eng.ServeHTTP(w, r) + return nil +} + +func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var job = eng.Job("events") + streamJSON(job, w, true) + job.Setenv("since", r.Form.Get("since")) + job.Setenv("until", r.Form.Get("until")) + return job.Run() +} + +func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var job = eng.Job("history", vars["name"]) + streamJSON(job, w, false) + + if err := job.Run(); err != nil { + return err + } + return nil +} + +func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("container_changes", vars["name"]) + streamJSON(job, w, false) + + return job.Run() +} + +func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.LessThan("1.4") { + return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) + streamJSON(job, w, false) + return job.Run() +} + +func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + err error + outs *engine.Table + job = eng.Job("containers") + ) + + job.Setenv("all", r.Form.Get("all")) + job.Setenv("size", r.Form.Get("size")) + job.Setenv("since", r.Form.Get("since")) + job.Setenv("before", r.Form.Get("before")) + job.Setenv("limit", r.Form.Get("limit")) + job.Setenv("filters", r.Form.Get("filters")) + + if version.GreaterThanOrEqualTo("1.5") { + streamJSON(job, w, false) + } else if outs, err = job.Stdout.AddTable(); err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + if version.LessThan("1.5") { // Convert to legacy format + for _, out := range outs.Data { + ports := engine.NewTable("", 0) + ports.ReadListFrom([]byte(out.Get("Ports"))) + out.Set("Ports", api.DisplayablePorts(ports)) + } + w.Header().Set("Content-Type", "application/json") + if _, err = outs.WriteListTo(w); err != nil { + return err + } + } + return nil +} + +func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var ( + inspectJob = eng.Job("container_inspect", vars["name"]) + logsJob = eng.Job("logs", vars["name"]) + c, err = inspectJob.Stdout.AddEnv() + ) + if err != nil { + return err + } + logsJob.Setenv("follow", r.Form.Get("follow")) + logsJob.Setenv("tail", r.Form.Get("tail")) + logsJob.Setenv("stdout", r.Form.Get("stdout")) + logsJob.Setenv("stderr", r.Form.Get("stderr")) + logsJob.Setenv("timestamps", r.Form.Get("timestamps")) + // Validate args here, because we can't return not StatusOK after job.Run() call + stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + if err = inspectJob.Run(); err != nil { + return err + } + + var outStream, errStream io.Writer + outStream = utils.NewWriteFlusher(w) + + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + + logsJob.Stdout.Add(outStream) + logsJob.Stderr.Set(errStream) + if err := logsJob.Run(); err != nil { + fmt.Fprintf(outStream, "Error running logs job: %s\n", err) + } + return nil +} + +func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) + job.Setenv("force", r.Form.Get("force")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + config engine.Env + env engine.Env + job = eng.Job("commit", r.Form.Get("container")) + stdoutBuffer = bytes.NewBuffer(nil) + ) + + if err := checkForJson(r); err != nil { + return err + } + + if err := config.Decode(r.Body); err != nil { + log.Errorf("%s", err) + } + + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + job.Setenv("pause", "1") + } else { + job.Setenv("pause", r.FormValue("pause")) + } + + job.Setenv("repo", r.Form.Get("repo")) + job.Setenv("tag", r.Form.Get("tag")) + job.Setenv("author", r.Form.Get("author")) + job.Setenv("comment", r.Form.Get("comment")) + job.SetenvSubEnv("config", &config) + + job.Stdout.Add(stdoutBuffer) + if err := job.Run(); err != nil { + return err + } + env.Set("Id", engine.Tail(stdoutBuffer, 1)) + return writeJSON(w, http.StatusCreated, env) +} + +// Creates an image from Pull or from Import +func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + job *engine.Job + ) + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := ®istry.AuthConfig{} + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + if image != "" { //pull + if tag == "" { + image, tag = parsers.ParseRepositoryTag(image) + } + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + job = eng.Job("pull", image, tag) + job.SetenvBool("parallel", version.GreaterThan("1.3")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + } else { //import + if tag == "" { + repo, tag = parsers.ParseRepositoryTag(repo) + } + job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) + job.Stdin.Add(r.Body) + } + + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + + return nil +} + +func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} + metaHeaders = map[string][]string{} + ) + + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + var job = eng.Job("search", r.Form.Get("term")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + streamJSON(job, w, false) + + return job.Run() +} + +func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := parseForm(r); err != nil { + return err + } + authConfig := ®istry.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return err + } + } + + job := eng.Job("push", vars["name"]) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + job.Setenv("tag", r.Form.Get("tag")) + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + return nil +} + +func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + if version.GreaterThan("1.0") { + w.Header().Set("Content-Type", "application/x-tar") + } + var job *engine.Job + if name, ok := vars["name"]; ok { + job = eng.Job("image_export", name) + } else { + job = eng.Job("image_export", r.Form["names"]...) + } + job.Stdout.Add(w) + return job.Run() +} + +func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + job := eng.Job("load") + job.Stdin.Add(r.Body) + return job.Run() +} + +func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + out engine.Env + job = eng.Job("create", r.Form.Get("name")) + outWarnings []string + stdoutBuffer = bytes.NewBuffer(nil) + warnings = bytes.NewBuffer(nil) + ) + + if err := checkForJson(r); err != nil { + return err + } + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + // Read container ID from the first line of stdout + job.Stdout.Add(stdoutBuffer) + // Read warnings from stderr + job.Stderr.Add(warnings) + if err := job.Run(); err != nil { + return err + } + // Parse warnings from stderr + scanner := bufio.NewScanner(warnings) + for scanner.Scan() { + outWarnings = append(outWarnings, scanner.Text()) + } + out.Set("Id", engine.Tail(stdoutBuffer, 1)) + out.SetList("Warnings", outWarnings) + + return writeJSON(w, http.StatusCreated, out) +} + +func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("restart", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("rm", vars["name"]) + + job.Setenv("forceRemove", r.Form.Get("force")) + + job.Setenv("removeVolume", r.Form.Get("v")) + job.Setenv("removeLink", r.Form.Get("link")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("image_delete", vars["name"]) + streamJSON(job, w, false) + job.Setenv("force", r.Form.Get("force")) + job.Setenv("noprune", r.Form.Get("noprune")) + + return job.Run() +} + +func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var ( + name = vars["name"] + job = eng.Job("start", name) + ) + + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // http://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := checkForJson(r); err != nil { + return err + } + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + } + + if err := job.Run(); err != nil { + if err.Error() == "Container already started" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("stop", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { + if err.Error() == "Container already stopped" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var ( + env engine.Env + stdoutBuffer = bytes.NewBuffer(nil) + job = eng.Job("wait", vars["name"]) + ) + job.Stdout.Add(stdoutBuffer) + if err := job.Run(); err != nil { + return err + } + + env.Set("StatusCode", engine.Tail(stdoutBuffer, 1)) + return writeJSON(w, http.StatusOK, env) +} + +func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + return err + } + return nil +} + +func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var ( + job = eng.Job("container_inspect", vars["name"]) + c, err = job.Stdout.AddEnv() + ) + if err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + defer func() { + if tcpc, ok := inStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else { + inStream.Close() + } + }() + defer func() { + if tcpc, ok := outStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else if closer, ok := outStream.(io.Closer); ok { + closer.Close() + } + }() + + var errStream io.Writer + + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + + job = eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(inStream) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + if err := job.Run(); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + + } + return nil +} + +func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil { + return err + } + + h := websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + job := eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(ws) + job.Stdout.Add(ws) + job.Stderr.Set(ws) + if err := job.Run(); err != nil { + log.Errorf("Error attaching websocket: %s", err) + } + }) + h.ServeHTTP(w, r) + + return nil +} + +func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("container_inspect", vars["name"]) + if version.LessThan("1.12") { + job.SetenvBool("raw", true) + } + streamJSON(job, w, false) + return job.Run() +} + +func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("image_inspect", vars["name"]) + if version.LessThan("1.12") { + job.SetenvBool("raw", true) + } + streamJSON(job, w, false) + return job.Run() +} + +func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.LessThan("1.3") { + return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") + } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} + configFileEncoded = r.Header.Get("X-Registry-Config") + configFile = ®istry.ConfigFile{} + job = eng.Job("build") + ) + + // This block can be removed when API versions prior to 1.9 are deprecated. + // Both headers will be parsed and sent along to the daemon, but if a non-empty + // ConfigFile is present, any value provided as an AuthConfig directly will + // be overridden. See BuildFile::CmdFrom for details. + if version.LessThan("1.9") && authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + + if configFileEncoded != "" { + configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) + if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + configFile = ®istry.ConfigFile{} + } + } + + if version.GreaterThanOrEqualTo("1.8") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + + if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else { + job.Setenv("rm", r.FormValue("rm")) + } + job.Stdin.Add(r.Body) + job.Setenv("remote", r.FormValue("remote")) + job.Setenv("t", r.FormValue("t")) + job.Setenv("q", r.FormValue("q")) + job.Setenv("nocache", r.FormValue("nocache")) + job.Setenv("forcerm", r.FormValue("forcerm")) + job.SetenvJson("authConfig", authConfig) + job.SetenvJson("configFile", configFile) + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) + w.Write(sf.FormatError(err)) + } + return nil +} + +func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var copyData engine.Env + + if err := checkForJson(r); err != nil { + return err + } + + if err := copyData.Decode(r.Body); err != nil { + return err + } + + if copyData.Get("Resource") == "" { + return fmt.Errorf("Path cannot be empty") + } + + origResource := copyData.Get("Resource") + + if copyData.Get("Resource")[0] == '/' { + copyData.Set("Resource", copyData.Get("Resource")[1:]) + } + + job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) + job.Stdout.Add(w) + w.Header().Set("Content-Type", "application/x-tar") + if err := job.Run(); err != nil { + log.Errorf("%s", err.Error()) + if strings.Contains(err.Error(), "No such container") { + w.WriteHeader(http.StatusNotFound) + } else if strings.Contains(err.Error(), "no such file or directory") { + return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) + } + } + return nil +} + +func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + out engine.Env + name = vars["name"] + job = eng.Job("execCreate", name) + stdoutBuffer = bytes.NewBuffer(nil) + ) + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + + job.Stdout.Add(stdoutBuffer) + // Register an instance of Exec in container. + if err := job.Run(); err != nil { + fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err) + return err + } + // Return the ID + out.Set("Id", engine.Tail(stdoutBuffer, 1)) + + return writeJSON(w, http.StatusCreated, out) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + name = vars["name"] + job = eng.Job("execStart", name) + errOut io.Writer = os.Stderr + ) + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + if !job.GetenvBool("Detach") { + // Setting up the streaming http interface. + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + + defer func() { + if tcpc, ok := inStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else { + inStream.Close() + } + }() + defer func() { + if tcpc, ok := outStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else if closer, ok := outStream.(io.Closer); ok { + closer.Close() + } + }() + + var errStream io.Writer + + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + job.Stdin.Add(inStream) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + errOut = outStream + } + // Now run the user process in container. + job.SetCloseIO(false) + if err := job.Run(); err != nil { + fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err) + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + return err + } + return nil +} + +func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} +func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Access-Control-Allow-Origin", "*") + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") + w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") +} + +func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // log the request + log.Debugf("Calling %s %s", localMethod, localRoute) + + if logging { + log.Infof("%s %s", r.Method, r.RequestURI) + } + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { + log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + } + } + version := version.Version(mux.Vars(r)["version"]) + if version == "" { + version = api.APIVERSION + } + if enableCors { + writeCorsHeaders(w, r) + } + + if version.GreaterThan(api.APIVERSION) { + http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) + return + } + + if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { + log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) + httpError(w, err) + } + } +} + +// Replicated from expvar.go as not public. +func expvarHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func AttachProfiler(router *mux.Router) { + router.HandleFunc("/debug/vars", expvarHandler) + router.HandleFunc("/debug/pprof/", pprof.Index) + router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + router.HandleFunc("/debug/pprof/profile", pprof.Profile) + router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) + router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) { + r := mux.NewRouter() + if os.Getenv("DEBUG") != "" { + AttachProfiler(r) + } + m := map[string]map[string]HttpApiFunc{ + "GET": { + "/_ping": ping, + "/events": getEvents, + "/info": getInfo, + "/version": getVersion, + "/images/json": getImagesJSON, + "/images/viz": getImagesViz, + "/images/search": getImagesSearch, + "/images/get": getImagesGet, + "/images/{name:.*}/get": getImagesGet, + "/images/{name:.*}/history": getImagesHistory, + "/images/{name:.*}/json": getImagesByName, + "/containers/ps": getContainersJSON, + "/containers/json": getContainersJSON, + "/containers/{name:.*}/export": getContainersExport, + "/containers/{name:.*}/changes": getContainersChanges, + "/containers/{name:.*}/json": getContainersByName, + "/containers/{name:.*}/top": getContainersTop, + "/containers/{name:.*}/logs": getContainersLogs, + "/containers/{name:.*}/attach/ws": wsContainersAttach, + }, + "POST": { + "/auth": postAuth, + "/commit": postCommit, + "/build": postBuild, + "/images/create": postImagesCreate, + "/images/load": postImagesLoad, + "/images/{name:.*}/push": postImagesPush, + "/images/{name:.*}/tag": postImagesTag, + "/containers/create": postContainersCreate, + "/containers/{name:.*}/kill": postContainersKill, + "/containers/{name:.*}/pause": postContainersPause, + "/containers/{name:.*}/unpause": postContainersUnpause, + "/containers/{name:.*}/restart": postContainersRestart, + "/containers/{name:.*}/start": postContainersStart, + "/containers/{name:.*}/stop": postContainersStop, + "/containers/{name:.*}/wait": postContainersWait, + "/containers/{name:.*}/resize": postContainersResize, + "/containers/{name:.*}/attach": postContainersAttach, + "/containers/{name:.*}/copy": postContainersCopy, + "/containers/{name:.*}/exec": postContainerExecCreate, + "/exec/{name:.*}/start": postContainerExecStart, + "/exec/{name:.*}/resize": postContainerExecResize, + }, + "DELETE": { + "/containers/{name:.*}": deleteContainers, + "/images/{name:.*}": deleteImages, + }, + "OPTIONS": { + "": optionsHandler, + }, + } + + for method, routes := range m { + for route, fct := range routes { + log.Debugf("Registering %s, %s", method, route) + // NOTE: scope issue, make sure the variables are local and won't be changed + localRoute := route + localFct := fct + localMethod := method + + // build the handler function + f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion)) + + // add the new route + if localRoute == "" { + r.Methods(localMethod).HandlerFunc(f) + } else { + r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) + r.Path(localRoute).Methods(localMethod).HandlerFunc(f) + } + } + } + + return r, nil +} + +// ServeRequest processes a single http request to the docker remote api. +// FIXME: refactor this to be part of Server and not require re-creating a new +// router each time. This requires first moving ListenAndServe into Server. +func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error { + router, err := createRouter(eng, false, true, "") + if err != nil { + return err + } + // Insert APIVERSION into the request as a convenience + req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) + router.ServeHTTP(w, req) + return nil +} + +// ServeFD creates an http.Server and sets it up to serve given a socket activated +// argument. +func ServeFd(addr string, handle http.Handler) error { + ls, e := systemd.ListenFD(addr) + if e != nil { + return e + } + + chErrors := make(chan error, len(ls)) + + // We don't want to start serving on these sockets until the + // daemon is initialized and installed. Otherwise required handlers + // won't be ready. + <-activationLock + + // Since ListenFD will return one or more sockets we have + // to create a go func to spawn off multiple serves + for i := range ls { + listener := ls[i] + go func() { + httpSrv := http.Server{Handler: handle} + chErrors <- httpSrv.Serve(listener) + }() + } + + for i := 0; i < len(ls); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +func lookupGidByName(nameOrGid string) (int, error) { + groups, err := user.ParseGroupFilter(func(g *user.Group) bool { + return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid + }) + if err != nil { + return -1, err + } + if groups != nil && len(groups) > 0 { + return groups[0].Gid, nil + } + return -1, fmt.Errorf("Group %s not found", nameOrGid) +} + +func changeGroup(addr string, nameOrGid string) error { + gid, err := lookupGidByName(nameOrGid) + if err != nil { + return err + } + + log.Debugf("%s group found. gid: %d", nameOrGid, gid) + return os.Chown(addr, 0, gid) +} + +// ListenAndServe sets up the required http.Server and gets it listening for +// each addr passed in and does protocol specific checking. +func ListenAndServe(proto, addr string, job *engine.Job) error { + var l net.Listener + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + if err != nil { + return err + } + + if proto == "fd" { + return ServeFd(addr, r) + } + + if proto == "unix" { + if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { + return err + } + } + + var oldmask int + if proto == "unix" { + oldmask = syscall.Umask(0777) + } + + if job.GetenvBool("BufferRequests") { + l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) + } else { + l, err = net.Listen(proto, addr) + } + + if proto == "unix" { + syscall.Umask(oldmask) + } + if err != nil { + return err + } + + if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { + tlsCert := job.Getenv("TlsCert") + tlsKey := job.Getenv("TlsKey") + cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) + if err != nil { + return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", + tlsCert, tlsKey, err) + } + tlsConfig := &tls.Config{ + NextProtos: []string{"http/1.1"}, + Certificates: []tls.Certificate{cert}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } + if job.GetenvBool("TlsVerify") { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(job.Getenv("TlsCa")) + if err != nil { + return fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + l = tls.NewListener(l, tlsConfig) + } + + // Basic error and sanity checking + switch proto { + case "tcp": + if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { + log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + case "unix": + socketGroup := job.Getenv("SocketGroup") + if socketGroup != "" { + if err := changeGroup(addr, socketGroup); err != nil { + if socketGroup == "docker" { + // if the user hasn't explicitly specified the group ownership, don't fail on errors. + log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) + } else { + return err + } + } + + } + if err := os.Chmod(addr, 0660); err != nil { + return err + } + default: + return fmt.Errorf("Invalid protocol format.") + } + + httpSrv := http.Server{Addr: addr, Handler: r} + return httpSrv.Serve(l) +} + +// ServeApi loops through all of the protocols sent in to docker and spawns +// off a go routine to setup a serving http.Server for each. +func ServeApi(job *engine.Job) engine.Status { + if len(job.Args) == 0 { + return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) + } + var ( + protoAddrs = job.Args + chErrors = make(chan error, len(protoAddrs)) + ) + activationLock = make(chan struct{}) + + for _, protoAddr := range protoAddrs { + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) + } + go func() { + log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) + chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) + }() + } + + for i := 0; i < len(protoAddrs); i++ { + err := <-chErrors + if err != nil { + return job.Error(err) + } + } + + return engine.StatusOK +} + +func AcceptConnections(job *engine.Job) engine.Status { + // Tell the init daemon we are accepting requests + go systemd.SdNotify("READY=1") + + // close the lock so the listeners start accepting connections + if activationLock != nil { + close(activationLock) + } + + return engine.StatusOK +} diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go new file mode 100644 index 00000000..519652f3 --- /dev/null +++ b/api/server/server_unit_test.go @@ -0,0 +1,555 @@ +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/version" +) + +func TestGetBoolParam(t *testing.T) { + if ret, err := getBoolParam("true"); err != nil || !ret { + t.Fatalf("true -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("True"); err != nil || !ret { + t.Fatalf("True -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("1"); err != nil || !ret { + t.Fatalf("1 -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam(""); err != nil || ret { + t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("false"); err != nil || ret { + t.Fatalf("false -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("0"); err != nil || ret { + t.Fatalf("0 -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("faux"); err == nil || ret { + t.Fatalf("faux -> false, err | got %t %s", ret, err) + + } +} + +func TesthttpError(t *testing.T) { + r := httptest.NewRecorder() + + httpError(r, fmt.Errorf("No such method")) + if r.Code != http.StatusNotFound { + t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) + } + + httpError(r, fmt.Errorf("This accound hasn't been activated")) + if r.Code != http.StatusForbidden { + t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) + } + + httpError(r, fmt.Errorf("Some error")) + if r.Code != http.StatusInternalServerError { + t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) + } +} + +func TestGetVersion(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("version", func(job *engine.Job) engine.Status { + called = true + v := &engine.Env{} + v.SetJson("Version", "42.1") + v.Set("ApiVersion", "1.1.1.1.1") + v.Set("GoVersion", "2.42") + v.Set("Os", "Linux") + v.Set("Arch", "x86_64") + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/version", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + v := readEnv(r.Body, t) + if v.Get("Version") != "42.1" { + t.Fatalf("%#v\n", v) + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } +} + +func TestGetInfo(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("info", func(job *engine.Job) engine.Status { + called = true + v := &engine.Env{} + v.SetInt("Containers", 1) + v.SetInt("Images", 42000) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/info", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + v := readEnv(r.Body, t) + if v.GetInt("Images") != 42000 { + t.Fatalf("%#v\n", v) + } + if v.GetInt("Containers") != 1 { + t.Fatalf("%#v\n", v) + } + assertContentType(r, "application/json", t) +} + +func TestGetImagesJSON(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("images", func(job *engine.Job) engine.Status { + called = true + v := createEnvFromGetImagesJSONStruct(sampleImage) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertHttpNotError(r, t) + assertContentType(r, "application/json", t) + var observed getImagesJSONStruct + if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(observed, sampleImage) { + t.Errorf("Expected %#v but got %#v", sampleImage, observed) + } +} + +func TestGetImagesJSONFilter(t *testing.T) { + eng := engine.New() + filter := "nothing" + eng.Register("images", func(job *engine.Job) engine.Status { + filter = job.Getenv("filter") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t) + if filter != "aaaa" { + t.Errorf("%#v", filter) + } +} + +func TestGetImagesJSONFilters(t *testing.T) { + eng := engine.New() + filter := "nothing" + eng.Register("images", func(job *engine.Job) engine.Status { + filter = job.Getenv("filters") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t) + if filter != "nnnn" { + t.Errorf("%#v", filter) + } +} + +func TestGetImagesJSONAll(t *testing.T) { + eng := engine.New() + allFilter := "-1" + eng.Register("images", func(job *engine.Job) engine.Status { + allFilter = job.Getenv("all") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?all=1", nil, eng, t) + if allFilter != "1" { + t.Errorf("%#v", allFilter) + } +} + +func TestGetImagesJSONLegacyFormat(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("images", func(job *engine.Job) engine.Status { + called = true + outsLegacy := engine.NewTable("Created", 0) + outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage)) + if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertHttpNotError(r, t) + assertContentType(r, "application/json", t) + images := engine.NewTable("Created", 0) + if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { + t.Fatal(err) + } + if images.Len() != 1 { + t.Fatalf("Expected 1 image, %d found", images.Len()) + } + image := images.Data[0] + if image.Get("Tag") != "test-tag" { + t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag")) + } + if image.Get("Repository") != "test-name" { + t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository")) + } +} + +func TestGetContainersByName(t *testing.T) { + eng := engine.New() + name := "container_name" + var called bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + called = true + if job.Args[0] != name { + t.Errorf("name != '%s': %#v", name, job.Args[0]) + } + if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { + t.Errorf("dirty env variable not set") + } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { + t.Errorf("dirty env variable set when it shouldn't") + } + v := &engine.Env{} + v.SetBool("dirty", true) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertContentType(r, "application/json", t) + var stdoutJson interface{} + if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { + t.Fatalf("%#v", err) + } + if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { + t.Fatalf("%#v", stdoutJson) + } +} + +func TestGetEvents(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("events", func(job *engine.Job) engine.Status { + called = true + since := job.Getenv("since") + if since != "1" { + t.Fatalf("'since' should be 1, found %#v instead", since) + } + until := job.Getenv("until") + if until != "0" { + t.Fatalf("'until' should be 0, found %#v instead", until) + } + v := &engine.Env{} + v.Set("since", since) + v.Set("until", until) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertContentType(r, "application/json", t) + var stdout_json struct { + Since int + Until int + } + if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil { + t.Fatal(err) + } + if stdout_json.Since != 1 { + t.Errorf("since != 1: %#v", stdout_json.Since) + } + if stdout_json.Until != 0 { + t.Errorf("until != 0: %#v", stdout_json.Until) + } +} + +func TestLogs(t *testing.T) { + eng := engine.New() + var inspect bool + var logs bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + inspect = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + return engine.StatusOK + }) + expected := "logs" + eng.Register("logs", func(job *engine.Job) engine.Status { + logs = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + follow := job.Getenv("follow") + if follow != "1" { + t.Fatalf("follow: %s, must be 1", follow) + } + stdout := job.Getenv("stdout") + if stdout != "1" { + t.Fatalf("stdout %s, must be 1", stdout) + } + stderr := job.Getenv("stderr") + if stderr != "" { + t.Fatalf("stderr %s, must be empty", stderr) + } + timestamps := job.Getenv("timestamps") + if timestamps != "1" { + t.Fatalf("timestamps %s, must be 1", timestamps) + } + job.Stdout.Write([]byte(expected)) + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t) + if r.Code != http.StatusOK { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) + } + if !inspect { + t.Fatal("container_inspect job was not called") + } + if !logs { + t.Fatal("logs job was not called") + } + res := r.Body.String() + if res != expected { + t.Fatalf("Output %s, expected %s", res, expected) + } +} + +func TestLogsNoStreams(t *testing.T) { + eng := engine.New() + var inspect bool + var logs bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + inspect = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + return engine.StatusOK + }) + eng.Register("logs", func(job *engine.Job) engine.Status { + logs = true + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/test/logs", nil, eng, t) + if r.Code != http.StatusBadRequest { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest) + } + if inspect { + t.Fatal("container_inspect job was called, but it shouldn't") + } + if logs { + t.Fatal("logs job was called, but it shouldn't") + } + res := strings.TrimSpace(r.Body.String()) + expected := "Bad parameters: you must choose at least one stream" + if !strings.Contains(res, expected) { + t.Fatalf("Output %s, expected %s in it", res, expected) + } +} + +func TestGetImagesHistory(t *testing.T) { + eng := engine.New() + imageName := "docker-test-image" + var called bool + eng.Register("history", func(job *engine.Job) engine.Status { + called = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != imageName { + t.Fatalf("name != '%s': %#v", imageName, job.Args[0]) + } + v := &engine.Env{} + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + if r.Code != http.StatusOK { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } +} + +func TestGetImagesByName(t *testing.T) { + eng := engine.New() + name := "image_name" + var called bool + eng.Register("image_inspect", func(job *engine.Job) engine.Status { + called = true + if job.Args[0] != name { + t.Fatalf("name != '%s': %#v", name, job.Args[0]) + } + if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { + t.Fatal("dirty env variable not set") + } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { + t.Fatal("dirty env variable set when it shouldn't") + } + v := &engine.Env{} + v.SetBool("dirty", true) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } + var stdoutJson interface{} + if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { + t.Fatalf("%#v", err) + } + if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { + t.Fatalf("%#v", stdoutJson) + } +} + +func TestDeleteContainers(t *testing.T) { + eng := engine.New() + name := "foo" + var called bool + eng.Register("rm", func(job *engine.Job) engine.Status { + called = true + if len(job.Args) == 0 { + t.Fatalf("Job arguments is empty") + } + if job.Args[0] != name { + t.Fatalf("name != '%s': %#v", name, job.Args[0]) + } + return engine.StatusOK + }) + r := serveRequest("DELETE", "/containers/"+name, nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + if r.Code != http.StatusNoContent { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent) + } +} + +func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { + return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t) +} + +func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { + r := httptest.NewRecorder() + req, err := http.NewRequest(method, target, body) + if err != nil { + t.Fatal(err) + } + if err := ServeRequest(eng, version, r, req); err != nil { + t.Fatal(err) + } + return r +} + +func readEnv(src io.Reader, t *testing.T) *engine.Env { + out := engine.NewOutput() + v, err := out.AddEnv() + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(out, src); err != nil { + t.Fatal(err) + } + out.Close() + return v +} + +func toJson(data interface{}, t *testing.T) io.Reader { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(data); err != nil { + t.Fatal(err) + } + return &buf +} + +func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) { + if recorder.HeaderMap.Get("Content-Type") != content_type { + t.Fatalf("%#v\n", recorder) + } +} + +// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that +// should die as soon as we converted all integration tests? +// assertHttpNotError expect the given response to not have an error. +// Otherwise the it causes the test to fail. +func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) { + // Non-error http status are [200, 400) + if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { + t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) + } +} + +func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env { + v := &engine.Env{} + v.SetList("RepoTags", data.RepoTags) + v.Set("Id", data.Id) + v.SetInt64("Created", data.Created) + v.SetInt64("Size", data.Size) + v.SetInt64("VirtualSize", data.VirtualSize) + return v +} + +type getImagesJSONStruct struct { + RepoTags []string + Id string + Created int64 + Size int64 + VirtualSize int64 +} + +var sampleImage getImagesJSONStruct = getImagesJSONStruct{ + RepoTags: []string{"test-name:test-tag"}, + Id: "ID", + Created: 999, + Size: 777, + VirtualSize: 666, +} diff --git a/builder/MAINTAINERS b/builder/MAINTAINERS new file mode 100644 index 00000000..4d158aa2 --- /dev/null +++ b/builder/MAINTAINERS @@ -0,0 +1,2 @@ +Tibor Vass (@tiborvass) +Erik Hollensbe (@erikh) diff --git a/builder/dispatchers.go b/builder/dispatchers.go new file mode 100644 index 00000000..2184e48a --- /dev/null +++ b/builder/dispatchers.go @@ -0,0 +1,353 @@ +package builder + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/log" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/runconfig" +) + +// dispatch with no layer / parsing. This is effectively not a command. +func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error { + return nil +} + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 2 { + return fmt.Errorf("ENV accepts two arguments") + } + + fullEnv := fmt.Sprintf("%s=%s", args[0], args[1]) + + for i, envVar := range b.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if args[0] == envParts[0] { + b.Config.Env[i] = fullEnv + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) + } + } + b.Config.Env = append(b.Config.Env, fullEnv) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("MAINTAINER requires only one argument") + } + + b.maintainer = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return fmt.Errorf("ADD requires at least two arguments") + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return fmt.Errorf("COPY requires at least two arguments") + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("FROM requires one argument") + } + + name := args[0] + + image, err := b.Daemon.Repositories().LookupImage(name) + if err != nil { + if b.Daemon.Graph().IsNotExist(err) { + image, err = b.pullImage(name) + } + + // note that the top level err will still be !nil here if IsNotExist is + // not the error. This approach just simplifies hte logic a bit. + if err != nil { + return err + } + } + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.Config.OnBuild = append(b.Config.OnBuild, original) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("WORKDIR requires exactly one argument") + } + + workdir := args[0] + + if workdir[0] == '/' { + b.Config.WorkingDir = workdir + } else { + if b.Config.WorkingDir == "" { + b.Config.WorkingDir = "/" + } + b.Config.WorkingDir = filepath.Join(b.Config.WorkingDir, workdir) + } + + return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// 'sh -c' in the event there is only one argument. The difference in +// processing: +// +// RUN echo hi # sh -c echo hi +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + args = handleJsonArgs(args, attributes) + + if len(args) == 1 { + args = append([]string{"/bin/sh", "-c"}, args[0]) + } + + runCmd := flag.NewFlagSet("run", flag.ContinueOnError) + runCmd.SetOutput(ioutil.Discard) + runCmd.Usage = nil + + config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...), nil) + if err != nil { + return err + } + + cmd := b.Config.Cmd + // set Cmd manually, this is special case only for Dockerfiles + b.Config.Cmd = config.Cmd + runconfig.Merge(b.Config, config) + + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + + log.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + c, err := b.create() + if err != nil { + return err + } + + // Ensure that we keep the container mounted until the commit + // to avoid unmounting and then mounting directly again + c.Mount() + defer c.Unmount() + + err = b.run(c) + if err != nil { + return err + } + if err := b.commit(c.ID, cmd, "run"); err != nil { + return err + } + + return nil +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { + b.Config.Cmd = handleJsonArgs(args, attributes) + + if !attributes["json"] { + b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...) + } + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", b.Config.Cmd)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will +// accept the CMD as the arguments to /usr/sbin/nginx. +// +// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { + parsed := handleJsonArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.Config.Entrypoint = parsed + case len(parsed) == 0: + // ENTRYPOINT [] + b.Config.Entrypoint = nil + default: + // ENTRYPOINT echo hi + b.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.Config.Cmd = nil + } + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", b.Config.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.Config.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if b.Config.ExposedPorts == nil { + b.Config.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...)) + if err != nil { + return err + } + + for port := range ports { + if _, exists := b.Config.ExposedPorts[port]; !exists { + b.Config.ExposedPorts[port] = struct{}{} + } + } + b.Config.PortSpecs = nil + + return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("USER requires exactly one argument") + } + + b.Config.User = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return fmt.Errorf("Volume cannot be empty") + } + + if b.Config.Volumes == nil { + b.Config.Volumes = map[string]struct{}{} + } + for _, v := range args { + b.Config.Volumes[v] = struct{}{} + } + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} + +// INSERT is no longer accepted, but we still parse it. +func insert(b *Builder, args []string, attributes map[string]bool, original string) error { + return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") +} diff --git a/builder/evaluator.go b/builder/evaluator.go new file mode 100644 index 00000000..7884d36a --- /dev/null +++ b/builder/evaluator.go @@ -0,0 +1,236 @@ +// builder is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of resposibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package builder + +import ( + "errors" + "fmt" + "io" + "os" + "path" + "strings" + + "github.com/docker/docker/builder/parser" + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +var ( + ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]struct{}{ + "env": {}, + "add": {}, + "copy": {}, + "workdir": {}, + "expose": {}, + "volume": {}, + "user": {}, +} + +var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + "env": env, + "maintainer": maintainer, + "add": add, + "copy": dispatchCopy, // copy() is a go builtin + "from": from, + "onbuild": onbuild, + "workdir": workdir, + "run": run, + "cmd": cmd, + "entrypoint": entrypoint, + "expose": expose, + "volume": volume, + "user": user, + "insert": insert, + } +} + +// internal struct, used to maintain configuration of the Dockerfile's +// processing as it evaluates the parsing result. +type Builder struct { + Daemon *daemon.Daemon + Engine *engine.Engine + + // effectively stdio for the run. Because it is not stdio, I said + // "Effectively". Do not use stdio anywhere in this package for any reason. + OutStream io.Writer + ErrStream io.Writer + + Verbose bool + UtilizeCache bool + + // controls how images and containers are handled between steps. + Remove bool + ForceRemove bool + + AuthConfig *registry.AuthConfig + AuthConfigFile *registry.ConfigFile + + // Deprecated, original writer used for ImagePull. To be removed. + OutOld io.Writer + StreamFormatter *utils.StreamFormatter + + Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. + + // both of these are controlled by the Remove and ForceRemove options in BuildOpts + TmpContainers map[string]struct{} // a map of containers used for removes + + dockerfile *parser.Node // the syntax tree of the dockerfile + image string // image name for commit processing + maintainer string // maintainer name. could probably be removed. + cmdSet bool // indicates is CMD was set in current Dockerfile + context tarsum.TarSum // the context is a tarball that is uploaded by the client + contextPath string // the path of the temporary directory the local context is unpacked to (server side) + +} + +// Run the builder with the context. This is the lynchpin of this package. This +// will (barring errors): +// +// * call readContext() which will set up the temporary directory and unpack +// the context into it. +// * read the dockerfile +// * parse the dockerfile +// * walk the parse tree and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Print a happy message and return the image ID. +// +func (b *Builder) Run(context io.Reader) (string, error) { + if err := b.readContext(context); err != nil { + return "", err + } + + defer func() { + if err := os.RemoveAll(b.contextPath); err != nil { + log.Debugf("[BUILDER] failed to remove temporary context: %s", err) + } + }() + + filename := path.Join(b.contextPath, "Dockerfile") + + fi, err := os.Stat(filename) + if os.IsNotExist(err) { + return "", fmt.Errorf("Cannot build a directory without a Dockerfile") + } + if fi.Size() == 0 { + return "", ErrDockerfileEmpty + } + + f, err := os.Open(filename) + if err != nil { + return "", err + } + + defer f.Close() + + ast, err := parser.Parse(f) + if err != nil { + return "", err + } + + b.dockerfile = ast + + // some initializations that would not have been supplied by the caller. + b.Config = &runconfig.Config{} + b.TmpContainers = map[string]struct{}{} + + for i, n := range b.dockerfile.Children { + if err := b.dispatch(i, n); err != nil { + if b.ForceRemove { + b.clearTmp() + } + return "", err + } + fmt.Fprintf(b.OutStream, " ---> %s\n", utils.TruncateID(b.image)) + if b.Remove { + b.clearTmp() + } + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n") + } + + fmt.Fprintf(b.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) + return b.image, nil +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statmeent, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, ast *parser.Node) error { + cmd := ast.Value + attrs := ast.Attributes + original := ast.Original + strs := []string{} + msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd)) + + if cmd == "onbuild" { + ast = ast.Next.Children[0] + strs = append(strs, ast.Value) + msg += " " + ast.Value + } + + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if _, ok := replaceEnvAllowed[cmd]; ok { + str = b.replaceEnv(ast.Value) + } + strs = append(strs, str) + msg += " " + ast.Value + } + + fmt.Fprintln(b.OutStream, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + return f(b, strs, attrs, original) + } + + fmt.Fprintf(b.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd)) + + return nil +} diff --git a/builder/internals.go b/builder/internals.go new file mode 100644 index 00000000..fa8b9f70 --- /dev/null +++ b/builder/internals.go @@ -0,0 +1,689 @@ +package builder + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/builder/parser" + "github.com/docker/docker/daemon" + imagepkg "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +func (b *Builder) readContext(context io.Reader) error { + tmpdirPath, err := ioutil.TempDir("", "docker-build") + if err != nil { + return err + } + + decompressedStream, err := archive.DecompressStream(context) + if err != nil { + return err + } + + if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil { + return err + } + + if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { + return err + } + + b.contextPath = tmpdirPath + return nil +} + +func (b *Builder) commit(id string, autoCmd []string, comment string) error { + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.Config.Image = b.image + if id == "" { + cmd := b.Config.Cmd + b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + container, err := b.create() + if err != nil { + return err + } + id = container.ID + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + } + container := b.Daemon.Get(id) + if container == nil { + return fmt.Errorf("An error occured while creating the container") + } + + // Note: Actually copy the struct + autoConfig := *b.Config + autoConfig.Cmd = autoCmd + + // Commit the container + image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) + if err != nil { + return err + } + b.image = image.ID + return nil +} + +type copyInfo struct { + origPath string + destPath string + hash string + decompress bool + tmpDir string +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + dest := args[len(args)-1] // last one is always the dest + + copyInfos := []*copyInfo{} + + b.Config.Image = b.image + + defer func() { + for _, ci := range copyInfos { + if ci.tmpDir != "" { + os.RemoveAll(ci.tmpDir) + } + } + }() + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + for _, orig := range args[0 : len(args)-1] { + err := calcCopyInfo(b, cmdName, ©Infos, orig, dest, allowRemote, allowDecompression) + if err != nil { + return err + } + } + + if len(copyInfos) == 0 { + return fmt.Errorf("No source files were specified") + } + + if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one CI then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(copyInfos) == 1 { + srcHash = copyInfos[0].hash + origPaths = copyInfos[0].origPath + } else { + var hashs []string + var origs []string + for _, ci := range copyInfos { + hashs = append(hashs, ci.hash) + origs = append(origs, ci.origPath) + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.Config.Cmd + b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + // If we do not have at least one hash, never use the cache + if hit && b.UtilizeCache { + return nil + } + + container, _, err := b.Daemon.Create(b.Config, nil, "") + if err != nil { + return err + } + b.TmpContainers[container.ID] = struct{}{} + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + + for _, ci := range copyInfos { + if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil { + return err + } + } + + if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil { + return err + } + return nil +} + +func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error { + + if origPath != "" && origPath[0] == '/' && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "./") + + // In the remote/URL case, download it and gen its hashcode + if utils.IsURL(origPath) { + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + + ci := copyInfo{} + ci.origPath = origPath + ci.hash = origPath // default to this but can change + ci.destPath = destPath + ci.decompress = false + *cInfos = append(*cInfos, &ci) + + // Initiate the download + resp, err := utils.Download(ci.origPath) + if err != nil { + return err + } + + // Create a tmp dir + tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") + if err != nil { + return err + } + ci.tmpDir = tmpDirName + + // Create a tmp file within our tmp dir + tmpFileName := path.Join(tmpDirName, "tmp") + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + + // Download and dump result to tmp file + if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil { + tmpFile.Close() + return err + } + fmt.Fprintf(b.OutStream, "\n") + tmpFile.Close() + + // Remove the mtime of the newly created tmp file + if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { + return err + } + + ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + + // If the destination is a directory, figure out the filename. + if strings.HasSuffix(ci.destPath, "/") { + u, err := url.Parse(origPath) + if err != nil { + return err + } + path := u.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + parts := strings.Split(path, "/") + filename := parts[len(parts)-1] + if filename == "" { + return fmt.Errorf("cannot determine filename from url: %s", u) + } + ci.destPath = ci.destPath + filename + } + + // Calc the checksum, only if we're using the cache + if b.UtilizeCache { + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return err + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) + if err != nil { + return err + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } + ci.hash = tarSum.Sum(nil) + r.Close() + } + + return nil + } + + // Deal with wildcards + if ContainsWildcards(origPath) { + for _, fileInfo := range b.context.GetSums() { + if fileInfo.Name() == "" { + continue + } + match, _ := path.Match(origPath, fileInfo.Name()) + if !match { + continue + } + + calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression) + } + return nil + } + + // Must be a dir or a file + + if err := b.checkPathForAddition(origPath); err != nil { + return err + } + fi, _ := os.Stat(path.Join(b.contextPath, origPath)) + + ci := copyInfo{} + ci.origPath = origPath + ci.hash = origPath + ci.destPath = destPath + ci.decompress = allowDecompression + *cInfos = append(*cInfos, &ci) + + // If not using cache don't need to do anything else. + // If we are using a cache then calc the hash for the src file/dir + if !b.UtilizeCache { + return nil + } + + // Deal with the single file case + if !fi.IsDir() { + // This will match first file in sums of the archive + fis := b.context.GetSums().GetFile(ci.origPath) + if fis != nil { + ci.hash = "file:" + fis.Sum() + } + return nil + } + + // Must be a dir + var subfiles []string + absOrigPath := path.Join(b.contextPath, ci.origPath) + + // Add a trailing / to make sure we only pick up nested files under + // the dir and not sibling files of the dir that just happen to + // start with the same chars + if !strings.HasSuffix(absOrigPath, "/") { + absOrigPath += "/" + } + + // Need path w/o / too to find matching dir w/o trailing / + absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] + + for _, fileInfo := range b.context.GetSums() { + absFile := path.Join(b.contextPath, fileInfo.Name()) + if strings.HasPrefix(absFile, absOrigPath) || absFile == absOrigPathNoSlash { + subfiles = append(subfiles, fileInfo.Sum()) + } + } + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) + + return nil +} + +func ContainsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (b *Builder) pullImage(name string) (*imagepkg.Image, error) { + remote, tag := parsers.ParseRepositoryTag(name) + if tag == "" { + tag = "latest" + } + pullRegistryAuth := b.AuthConfig + if len(b.AuthConfigFile.Configs) > 0 { + // The request came with a full auth config file, we prefer to use that + endpoint, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return nil, err + } + resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint) + pullRegistryAuth = &resolvedAuth + } + job := b.Engine.Job("pull", remote, tag) + job.SetenvBool("json", b.StreamFormatter.Json()) + job.SetenvBool("parallel", true) + job.SetenvJson("authConfig", pullRegistryAuth) + job.Stdout.Add(b.OutOld) + if err := job.Run(); err != nil { + return nil, err + } + image, err := b.Daemon.Repositories().LookupImage(name) + if err != nil { + return nil, err + } + + return image, nil +} + +func (b *Builder) processImageFrom(img *imagepkg.Image) error { + b.image = img.ID + + if img.Config != nil { + b.Config = img.Config + } + + if len(b.Config.Env) == 0 { + b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { + fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. + onBuildTriggers := b.Config.OnBuild + b.Config.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for stepN, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for i, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step) + + if err := b.dispatch(i, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) +// and if so attempts to look up the current `b.image` and `b.Config` pair +// in the current server `b.Daemon`. If an image is found, probeCache returns +// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there +// is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + if b.UtilizeCache { + if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil { + return false, err + } else if cache != nil { + fmt.Fprintf(b.OutStream, " ---> Using cache\n") + log.Debugf("[BUILDER] Use cached version") + b.image = cache.ID + return true, nil + } else { + log.Debugf("[BUILDER] Cache miss") + } + } + return false, nil +} + +func (b *Builder) create() (*daemon.Container, error) { + if b.image == "" { + return nil, fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.Config.Image = b.image + + config := *b.Config + + // Create the container + c, warnings, err := b.Daemon.Create(b.Config, nil, "") + if err != nil { + return nil, err + } + for _, warning := range warnings { + fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) + } + + b.TmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + c.Path = config.Cmd[0] + c.Args = config.Cmd[1:] + + return c, nil +} + +func (b *Builder) run(c *daemon.Container) error { + var errCh chan error + if b.Verbose { + errCh = promise.Go(func() error { + // FIXME: call the 'attach' job so that daemon.Attach can be made private + // + // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach + // but without hijacking for stdin. Also, with attach there can be race + // condition because of some output already was printed before it. + return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream) + }) + } + + //start the container + if err := c.Start(); err != nil { + return err + } + + if errCh != nil { + if err := <-errCh; err != nil { + return err + } + } + + // Wait for it to finish + if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 { + err := &utils.JSONError{ + Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret), + Code: ret, + } + return err + } + + return nil +} + +func (b *Builder) checkPathForAddition(orig string) error { + origPath := path.Join(b.contextPath, orig) + origPath, err := filepath.EvalSymlinks(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + if !strings.HasPrefix(origPath, b.contextPath) { + return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) + } + if _, err := os.Stat(origPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + return nil +} + +func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error { + var ( + err error + destExists = true + origPath = path.Join(b.contextPath, orig) + destPath = path.Join(container.RootfsPath(), dest) + ) + + if destPath != container.RootfsPath() { + destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) + if err != nil { + return err + } + } + + // Preserve the trailing '/' + if strings.HasSuffix(dest, "/") || dest == "." { + destPath = destPath + "/" + } + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + destExists = false + } + + fi, err := os.Stat(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + + if fi.IsDir() { + return copyAsDirectory(origPath, destPath, destExists) + } + + // If we are adding a remote file (or we've been told not to decompress), do not try to untar it + if decompress { + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in / . + tarDest := destPath + if strings.HasSuffix(tarDest, "/") { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + if err := chrootarchive.UntarPath(origPath, tarDest); err == nil { + return nil + } else if err != io.EOF { + log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) + } + } + + if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { + return err + } + if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil { + return err + } + + resPath := destPath + if destExists && destStat.IsDir() { + resPath = path.Join(destPath, path.Base(origPath)) + } + + return fixPermissions(resPath, 0, 0) +} + +func copyAsDirectory(source, destination string, destinationExists bool) error { + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + + if destinationExists { + files, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + for _, file := range files { + if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { + return err + } + } + return nil + } + + return fixPermissions(destination, 0, 0) +} + +func fixPermissions(destination string, uid, gid int) error { + return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { + if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { + return err + } + return nil + }) +} + +func (b *Builder) clearTmp() { + for c := range b.TmpContainers { + tmp := b.Daemon.Get(c) + if err := b.Daemon.Destroy(tmp); err != nil { + fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) + return + } + b.Daemon.DeleteVolumes(tmp.VolumePaths()) + delete(b.TmpContainers, c) + fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + } +} diff --git a/builder/job.go b/builder/job.go new file mode 100644 index 00000000..555232c9 --- /dev/null +++ b/builder/job.go @@ -0,0 +1,130 @@ +package builder + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "strings" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +type BuilderJob struct { + Engine *engine.Engine + Daemon *daemon.Daemon +} + +func (b *BuilderJob) Install() { + b.Engine.Register("build", b.CmdBuild) +} + +func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { + if len(job.Args) != 0 { + return job.Errorf("Usage: %s\n", job.Name) + } + var ( + remoteURL = job.Getenv("remote") + repoName = job.Getenv("t") + suppressOutput = job.GetenvBool("q") + noCache = job.GetenvBool("nocache") + rm = job.GetenvBool("rm") + forceRm = job.GetenvBool("forcerm") + authConfig = ®istry.AuthConfig{} + configFile = ®istry.ConfigFile{} + tag string + context io.ReadCloser + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("configFile", configFile) + + repoName, tag = parsers.ParseRepositoryTag(repoName) + if repoName != "" { + if _, _, err := registry.ResolveRepositoryName(repoName); err != nil { + return job.Error(err) + } + if len(tag) > 0 { + if err := graph.ValidateTagName(tag); err != nil { + return job.Error(err) + } + } + } + + if remoteURL == "" { + context = ioutil.NopCloser(job.Stdin) + } else if utils.IsGIT(remoteURL) { + if !strings.HasPrefix(remoteURL, "git://") { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return job.Errorf("Error trying to use git: %s (%s)", err, output) + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + context = c + } else if utils.IsURL(remoteURL) { + f, err := utils.Download(remoteURL) + if err != nil { + return job.Error(err) + } + defer f.Body.Close() + dockerFile, err := ioutil.ReadAll(f.Body) + if err != nil { + return job.Error(err) + } + c, err := archive.Generate("Dockerfile", string(dockerFile)) + if err != nil { + return job.Error(err) + } + context = c + } + defer context.Close() + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + + builder := &Builder{ + Daemon: b.Daemon, + Engine: b.Engine, + OutStream: &utils.StdoutFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + ErrStream: &utils.StderrFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + Verbose: !suppressOutput, + UtilizeCache: !noCache, + Remove: rm, + ForceRemove: forceRm, + OutOld: job.Stdout, + StreamFormatter: sf, + AuthConfig: authConfig, + AuthConfigFile: configFile, + } + + id, err := builder.Run(context) + if err != nil { + return job.Error(err) + } + + if repoName != "" { + b.Daemon.Repositories().Set(repoName, tag, id, false) + } + return engine.StatusOK +} diff --git a/builder/parser/dumper/main.go b/builder/parser/dumper/main.go new file mode 100644 index 00000000..33202b70 --- /dev/null +++ b/builder/parser/dumper/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + + ast, err := parser.Parse(f) + if err != nil { + panic(err) + } else { + fmt.Println(ast.Dump()) + } + } +} diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go new file mode 100644 index 00000000..358e2f73 --- /dev/null +++ b/builder/parser/line_parsers.go @@ -0,0 +1,155 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" +) + +var ( + errDockerfileJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string) (*Node, map[string]bool, error) { + _, child, err := parseLine(rest) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseEnv(rest string) (*Node, map[string]bool, error) { + node := &Node{} + rootnode := node + strs := TOKEN_WHITESPACE.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf("ENV must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { + node := &Node{} + rootnode := node + prevnode := node + for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string) (*Node, map[string]bool, error) { + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string) (*Node, map[string]bool, error) { + var ( + myJson []interface{} + next = &Node{} + orignext = next + prevnode = next + ) + + if err := json.Unmarshal([]byte(rest), &myJson); err != nil { + return nil, nil, err + } + + for _, str := range myJson { + switch str.(type) { + case string: + case float64: + str = strconv.FormatFloat(str.(float64), 'G', -1, 64) + default: + return nil, nil, errDockerfileJSONNesting + } + next.Value = str.(string) + next.Next = &Node{} + prevnode = next + next = next.Next + } + + prevnode.Next = nil + + return orignext, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { + rest = strings.TrimSpace(rest) + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileJSONNesting { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attmpts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { + rest = strings.TrimSpace(rest) + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileJSONNesting { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest) +} diff --git a/builder/parser/parser.go b/builder/parser/parser.go new file mode 100644 index 00000000..6b0ab7ab --- /dev/null +++ b/builder/parser/parser.go @@ -0,0 +1,140 @@ +// This package implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "io" + "regexp" + "strings" + "unicode" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing +} + +var ( + dispatch map[string]func(string) (*Node, map[string]bool, error) + TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`) + TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\\s*$`) + TOKEN_COMMENT = regexp.MustCompile(`^#.*$`) +) + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // recieves the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propogated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string) (*Node, map[string]bool, error){ + "user": parseString, + "onbuild": parseSubCommand, + "workdir": parseString, + "env": parseEnv, + "maintainer": parseString, + "from": parseString, + "add": parseStringsWhitespaceDelimited, + "copy": parseStringsWhitespaceDelimited, + "run": parseMaybeJSON, + "cmd": parseMaybeJSON, + "entrypoint": parseMaybeJSON, + "expose": parseStringsWhitespaceDelimited, + "volume": parseMaybeJSONToList, + "insert": parseIgnore, + } +} + +// parse a line and return the remainder. +func parseLine(line string) (string, *Node, error) { + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if TOKEN_LINE_CONTINUATION.MatchString(line) { + line = TOKEN_LINE_CONTINUATION.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args) + if err != nil { + return "", nil, err + } + + if sexp.Value != "" || sexp.Next != nil || sexp.Children != nil { + node.Next = sexp + } + + node.Attributes = attrs + node.Original = line + + return "", node, nil +} + +// The main parse routine. Handles an io.ReadWriteCloser and returns the root +// of the AST. +func Parse(rwc io.Reader) (*Node, error) { + root := &Node{} + scanner := bufio.NewScanner(rwc) + + for scanner.Scan() { + scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) + if stripComments(scannedLine) == "" { + continue + } + + line, child, err := parseLine(scannedLine) + if err != nil { + return nil, err + } + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = parseLine(line + newline) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + } + + if child != nil { + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/builder/parser/parser_test.go b/builder/parser/parser_test.go new file mode 100644 index 00000000..1b517fcc --- /dev/null +++ b/builder/parser/parser_test.go @@ -0,0 +1,82 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" + +func getDirs(t *testing.T, dir string) []os.FileInfo { + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + dirs, err := f.Readdir(0) + if err != nil { + t.Fatal(err) + } + + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir.Name(), "Dockerfile") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error()) + } + + _, err = Parse(df) + if err == nil { + t.Fatalf("No error parsing broken dockerfile for %s", dir.Name()) + } + + df.Close() + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir.Name(), "Dockerfile") + resultfile := filepath.Join(testDir, dir.Name(), "result") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error()) + } + + rf, err := os.Open(resultfile) + if err != nil { + t.Fatalf("Result file missing for %s: %s", dir.Name(), err.Error()) + } + + ast, err := Parse(df) + if err != nil { + t.Fatalf("Error parsing %s's dockerfile: %s", dir.Name(), err.Error()) + } + + content, err := ioutil.ReadAll(rf) + if err != nil { + t.Fatalf("Error reading %s's result file: %s", dir.Name(), err.Error()) + } + + if ast.Dump()+"\n" != string(content) { + fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) + fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) + t.Fatalf("%s: AST dump of dockerfile does not match result", dir.Name()) + } + + df.Close() + rf.Close() + } +} diff --git a/builder/parser/testfiles-negative/env_equals_env/Dockerfile b/builder/parser/testfiles-negative/env_equals_env/Dockerfile new file mode 100644 index 00000000..08675148 --- /dev/null +++ b/builder/parser/testfiles-negative/env_equals_env/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH=PATH diff --git a/builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile b/builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile new file mode 100644 index 00000000..90531a4b --- /dev/null +++ b/builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile @@ -0,0 +1,2 @@ + + diff --git a/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile b/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 00000000..d1be4596 --- /dev/null +++ b/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/builder/parser/testfiles/brimstone-consuldock/Dockerfile b/builder/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 00000000..5c75a2e0 --- /dev/null +++ b/builder/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,25 @@ +FROM brimstone/ubuntu:14.04 + +MAINTAINER brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/parser/testfiles/brimstone-consuldock/result b/builder/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 00000000..cc8fab21 --- /dev/null +++ b/builder/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(maintainer "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH") diff --git a/builder/parser/testfiles/brimstone-docker-consul/Dockerfile b/builder/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 00000000..25ae3521 --- /dev/null +++ b/builder/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/parser/testfiles/brimstone-docker-consul/result b/builder/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 00000000..8c989e62 --- /dev/null +++ b/builder/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/hashicorp/consul && mv $GOPATH/bin/consul /usr/bin/consul && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH") diff --git a/builder/parser/testfiles/continueIndent/Dockerfile b/builder/parser/testfiles/continueIndent/Dockerfile new file mode 100644 index 00000000..42b324e7 --- /dev/null +++ b/builder/parser/testfiles/continueIndent/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff --git a/builder/parser/testfiles/continueIndent/result b/builder/parser/testfiles/continueIndent/result new file mode 100644 index 00000000..268ae073 --- /dev/null +++ b/builder/parser/testfiles/continueIndent/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff --git a/builder/parser/testfiles/cpuguy83-nagios/Dockerfile b/builder/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 00000000..8ccb71a5 --- /dev/null +++ b/builder/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/builder/parser/testfiles/cpuguy83-nagios/result b/builder/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 00000000..25dd3ddf --- /dev/null +++ b/builder/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff --git a/builder/parser/testfiles/docker/Dockerfile b/builder/parser/testfiles/docker/Dockerfile new file mode 100644 index 00000000..fba1d8b9 --- /dev/null +++ b/builder/parser/testfiles/docker/Dockerfile @@ -0,0 +1,105 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: Apparmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +docker-version 0.6.1 +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + lxc=1.0* \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/builder/parser/testfiles/docker/result b/builder/parser/testfiles/docker/result new file mode 100644 index 00000000..db74e869 --- /dev/null +++ b/builder/parser/testfiles/docker/result @@ -0,0 +1,25 @@ +(docker-version) +(from "ubuntu:14.04") +(maintainer "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq apt-utils aufs-tools automake btrfs-tools build-essential curl dpkg-sig git iptables libapparmor-dev libcap-dev libsqlite3-dev lxc=1.0* mercurial pandoc parallel reprepro ruby1.9.1 ruby1.9.1-dev s3cmd=1.1.0* --no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get golang.org/x/tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff --git a/builder/parser/testfiles/escapes/Dockerfile b/builder/parser/testfiles/escapes/Dockerfile new file mode 100644 index 00000000..1ffb17ef --- /dev/null +++ b/builder/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/builder/parser/testfiles/escapes/result b/builder/parser/testfiles/escapes/result new file mode 100644 index 00000000..13e409cb --- /dev/null +++ b/builder/parser/testfiles/escapes/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(maintainer "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/builder/parser/testfiles/influxdb/Dockerfile b/builder/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 00000000..587fb9b5 --- /dev/null +++ b/builder/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/builder/parser/testfiles/influxdb/result b/builder/parser/testfiles/influxdb/result new file mode 100644 index 00000000..0998e87e --- /dev/null +++ b/builder/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 00000000..39fe27d9 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 00000000..afc220c2 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 00000000..eaae081a --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 00000000..484804e2 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 00000000..c3ac63c0 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 00000000..61478912 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 00000000..5fd4afa5 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 00000000..1ffbb8ff --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 00000000..30cc4bb4 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 00000000..32048147 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 00000000..35f9c24a --- /dev/null +++ b/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/builder/parser/testfiles/kartar-entrypoint-oddities/result b/builder/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 00000000..b5ac6fe4 --- /dev/null +++ b/builder/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(maintainer "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff --git a/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 00000000..188395fe --- /dev/null +++ b/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +MAINTAINER docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/builder/parser/testfiles/lk4d4-the-edge-case-generator/result b/builder/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 00000000..6f7d57a3 --- /dev/null +++ b/builder/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(maintainer "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff --git a/builder/parser/testfiles/mail/Dockerfile b/builder/parser/testfiles/mail/Dockerfile new file mode 100644 index 00000000..f64c1168 --- /dev/null +++ b/builder/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/builder/parser/testfiles/mail/result b/builder/parser/testfiles/mail/result new file mode 100644 index 00000000..a0efcf04 --- /dev/null +++ b/builder/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff --git a/builder/parser/testfiles/multiple-volumes/Dockerfile b/builder/parser/testfiles/multiple-volumes/Dockerfile new file mode 100644 index 00000000..57bb5976 --- /dev/null +++ b/builder/parser/testfiles/multiple-volumes/Dockerfile @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/builder/parser/testfiles/multiple-volumes/result b/builder/parser/testfiles/multiple-volumes/result new file mode 100644 index 00000000..18dbdeea --- /dev/null +++ b/builder/parser/testfiles/multiple-volumes/result @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/builder/parser/testfiles/mumble/Dockerfile b/builder/parser/testfiles/mumble/Dockerfile new file mode 100644 index 00000000..5b9ec06a --- /dev/null +++ b/builder/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/builder/parser/testfiles/mumble/result b/builder/parser/testfiles/mumble/result new file mode 100644 index 00000000..a0036a94 --- /dev/null +++ b/builder/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff --git a/builder/parser/testfiles/nginx/Dockerfile b/builder/parser/testfiles/nginx/Dockerfile new file mode 100644 index 00000000..bf8368e1 --- /dev/null +++ b/builder/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/builder/parser/testfiles/nginx/result b/builder/parser/testfiles/nginx/result new file mode 100644 index 00000000..56ddb6f2 --- /dev/null +++ b/builder/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff --git a/builder/parser/testfiles/tf2/Dockerfile b/builder/parser/testfiles/tf2/Dockerfile new file mode 100644 index 00000000..72b79bdd --- /dev/null +++ b/builder/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/builder/parser/testfiles/tf2/result b/builder/parser/testfiles/tf2/result new file mode 100644 index 00000000..d4f94cd8 --- /dev/null +++ b/builder/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/builder/parser/testfiles/weechat/Dockerfile b/builder/parser/testfiles/weechat/Dockerfile new file mode 100644 index 00000000..48420881 --- /dev/null +++ b/builder/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/builder/parser/testfiles/weechat/result b/builder/parser/testfiles/weechat/result new file mode 100644 index 00000000..c3abb4c5 --- /dev/null +++ b/builder/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff --git a/builder/parser/testfiles/znc/Dockerfile b/builder/parser/testfiles/znc/Dockerfile new file mode 100644 index 00000000..3a4da6e9 --- /dev/null +++ b/builder/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/builder/parser/testfiles/znc/result b/builder/parser/testfiles/znc/result new file mode 100644 index 00000000..5493b255 --- /dev/null +++ b/builder/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff --git a/builder/parser/utils.go b/builder/parser/utils.go new file mode 100644 index 00000000..096c4e31 --- /dev/null +++ b/builder/parser/utils.go @@ -0,0 +1,94 @@ +package parser + +import ( + "fmt" + "strings" +) + +// QuoteString walks characters (after trimming), escapes any quotes and +// escapes, then wraps the whole thing in quotes. Very useful for generating +// argument output in nodes. +func QuoteString(str string) string { + result := "" + chars := strings.Split(strings.TrimSpace(str), "") + + for _, char := range chars { + switch char { + case `"`: + result += `\"` + case `\`: + result += `\\` + default: + result += char + } + } + + return `"` + result + `"` +} + +// dumps the AST defined by `node` as a list of sexps. Returns a string +// suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + QuoteString(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, string, error) { + cmdline := TOKEN_WHITESPACE.Split(line, 2) + + if len(cmdline) != 2 { + return "", "", fmt.Errorf("We do not understand this file. Please ensure it is a valid Dockerfile. Parser error at %q", line) + } + + cmd := strings.ToLower(cmdline[0]) + // the cmd should never have whitespace, but it's possible for the args to + // have trailing whitespace. + return cmd, strings.TrimSpace(cmdline[1]), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if TOKEN_COMMENT.MatchString(line) { + return TOKEN_COMMENT.ReplaceAllString(line, "") + } + + return line +} diff --git a/builder/support.go b/builder/support.go new file mode 100644 index 00000000..6833457f --- /dev/null +++ b/builder/support.go @@ -0,0 +1,59 @@ +package builder + +import ( + "regexp" + "strings" +) + +var ( + // `\\\\+|[^\\]|\b|\A` - match any number of "\\" (ie, properly-escaped backslashes), or a single non-backslash character, or a word boundary, or beginning-of-line + // `\$` - match literal $ + // `[[:alnum:]_]+` - match things like `$SOME_VAR` + // `{[[:alnum:]_]+}` - match things like `${SOME_VAR}` + tokenEnvInterpolation = regexp.MustCompile(`(\\|\\\\+|[^\\]|\b|\A)\$([[:alnum:]_]+|{[[:alnum:]_]+})`) + // this intentionally punts on more exotic interpolations like ${SOME_VAR%suffix} and lets the shell handle those directly +) + +// handle environment replacement. Used in dispatcher. +func (b *Builder) replaceEnv(str string) string { + for _, match := range tokenEnvInterpolation.FindAllString(str, -1) { + idx := strings.Index(match, "\\$") + if idx != -1 { + if idx+2 >= len(match) { + str = strings.Replace(str, match, "\\$", -1) + continue + } + + prefix := match[:idx] + stripped := match[idx+2:] + str = strings.Replace(str, match, prefix+"$"+stripped, -1) + continue + } + + match = match[strings.Index(match, "$"):] + matchKey := strings.Trim(match, "${}") + + for _, keyval := range b.Config.Env { + tmp := strings.SplitN(keyval, "=", 2) + if tmp[0] == matchKey { + str = strings.Replace(str, match, tmp[1], -1) + break + } + } + } + + return str +} + +func handleJsonArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/builtins/builtins.go b/builtins/builtins.go new file mode 100644 index 00000000..41bb2492 --- /dev/null +++ b/builtins/builtins.go @@ -0,0 +1,75 @@ +package builtins + +import ( + "runtime" + + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/daemon/networkdriver/bridge" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/events" + "github.com/docker/docker/pkg/parsers/kernel" +) + +func Register(eng *engine.Engine) error { + if err := daemon(eng); err != nil { + return err + } + if err := remote(eng); err != nil { + return err + } + if err := events.New().Install(eng); err != nil { + return err + } + if err := eng.Register("version", dockerVersion); err != nil { + return err + } + + return nil +} + +// remote: a RESTful api for cross-docker communication +func remote(eng *engine.Engine) error { + if err := eng.Register("serveapi", apiserver.ServeApi); err != nil { + return err + } + return eng.Register("acceptconnections", apiserver.AcceptConnections) +} + +// daemon: a default execution and storage backend for Docker on Linux, +// with the following underlying components: +// +// * Pluggable storage drivers including aufs, vfs, lvm and btrfs. +// * Pluggable execution drivers including lxc and chroot. +// +// In practice `daemon` still includes most core Docker components, including: +// +// * The reference registry client implementation +// * Image management +// * The build facility +// * Logging +// +// These components should be broken off into plugins of their own. +// +func daemon(eng *engine.Engine) error { + return eng.Register("init_networkdriver", bridge.InitDriver) +} + +// builtins jobs independent of any subsystem +func dockerVersion(job *engine.Job) engine.Status { + v := &engine.Env{} + v.SetJson("Version", dockerversion.VERSION) + v.SetJson("ApiVersion", api.APIVERSION) + v.SetJson("GitCommit", dockerversion.GITCOMMIT) + v.Set("GoVersion", runtime.Version()) + v.Set("Os", runtime.GOOS) + v.Set("Arch", runtime.GOARCH) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + v.Set("KernelVersion", kernelVersion.String()) + } + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/contrib/MAINTAINERS b/contrib/MAINTAINERS new file mode 100644 index 00000000..18e05a30 --- /dev/null +++ b/contrib/MAINTAINERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/contrib/README b/contrib/README new file mode 100644 index 00000000..92b1d944 --- /dev/null +++ b/contrib/README @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/contrib/check-config.sh b/contrib/check-config.sh new file mode 100755 index 00000000..afaabbc9 --- /dev/null +++ b/contrib/check-config.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash +set -e + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) +: ${CONFIG:="${possibleConfigs[0]}"} + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} + +# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors +declare -A colors=( + [black]=30 + [red]=31 + [green]=32 + [yellow]=33 + [blue]=34 + [magenta]=35 + [cyan]=36 + [white]=37 +) +color() { + color=() + if [ "$1" = 'bold' ]; then + color+=( '1' ) + shift + fi + if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then + color+=( "${colors[$1]}" ) + fi + local IFS=';' + echo -en '\033['"${color[*]}"m +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set "$1"; then + wrap_good "CONFIG_$1" 'enabled' + else + wrap_bad "CONFIG_$1" 'missing' + fi +} + +check_flags() { + for flag in "$@"; do + echo "- $(check_flag "$flag")" + done +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + DEVPTS_MULTIPLE_INSTANCES + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED + MACVLAN VETH BRIDGE + NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} + NF_NAT NF_NAT_NEEDED +) +check_flags "${flags[@]}" +echo + +echo 'Optional Features:' +flags=( + MEMCG_SWAP + RESOURCE_COUNTERS + CGROUP_PERF +) +check_flags "${flags[@]}" + +echo '- Storage Drivers:' +{ + echo '- "'$(wrap_color 'aufs' blue)'":' + check_flags AUFS_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' + if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" + fi + + echo '- "'$(wrap_color 'btrfs' blue)'":' + check_flags BTRFS_FS | sed 's/^/ /' + + echo '- "'$(wrap_color 'devicemapper' blue)'":' + check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' +} | sed 's/^/ /' +echo + +#echo 'Potential Future Features:' +#check_flags USER_NS +#echo diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker new file mode 100755 index 00000000..cc16d482 --- /dev/null +++ b/contrib/completion/bash/docker @@ -0,0 +1,780 @@ +#!bash +# +# bash completion file for core docker commands +# +# This script provides supports completion of: +# - commands and their options +# - container ids and names +# - image repos and tags +# - filepaths +# +# To enable the completions either: +# - place this file in /etc/bash_completion.d +# or +# - copy this file and add the line below to your .bashrc after +# bash completion features are loaded +# . docker.bash +# +# Note: +# Currently, the completions will not work if the docker daemon is not +# bound to the default communication port/socket +# If the docker daemon is using a unix socket for communication your user +# must have access to the socket for the completions to function correctly + +__docker_q() { + docker 2>/dev/null "$@" +} + +__docker_containers_all() { + local IFS=$'\n' + local containers=( $(__docker_q ps -aq --no-trunc) ) + if [ "$1" ]; then + containers=( $(__docker_q inspect --format "{{if $1}}{{.Id}}{{end}}" "${containers[@]}") ) + fi + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + unset IFS + COMPREPLY=( $(compgen -W "${names[*]} ${containers[*]}" -- "$cur") ) +} + +__docker_containers_running() { + __docker_containers_all '.State.Running' +} + +__docker_containers_stopped() { + __docker_containers_all 'not .State.Running' +} + +__docker_containers_pauseable() { + __docker_containers_all 'and .State.Running (not .State.Paused)' +} + +__docker_containers_unpauseable() { + __docker_containers_all '.State.Paused' +} + +__docker_image_repos() { + local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" + COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) +} + +__docker_image_repos_and_tags() { + local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" + COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_image_repos_and_tags_and_ids() { + local images="$(__docker_q images -a --no-trunc | awk 'NR>1 { print $3; if ($1 != "") { print $1; print $1":"$2 } }')" + COMPREPLY=( $(compgen -W "$images" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_containers_and_images() { + __docker_containers_all + local containers=( "${COMPREPLY[@]}" ) + __docker_image_repos_and_tags_and_ids + COMPREPLY+=( "${containers[@]}" ) +} + +__docker_pos_first_nonflag() { + local argument_flags=$1 + + local counter=$cpos + while [ $counter -le $cword ]; do + if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then + (( counter++ )) + else + case "${words[$counter]}" in + -*) + ;; + *) + break + ;; + esac + fi + (( counter++ )) + done + + echo $counter +} + +_docker_docker() { + case "$prev" in + -H) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-H" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) + ;; + esac +} + +_docker_attach() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--no-stdin --sig-proxy" -- "$cur" ) ) + ;; + *) + local counter="$(__docker_pos_first_nonflag)" + if [ $cword -eq $counter ]; then + __docker_containers_running + fi + ;; + esac +} + +_docker_build() { + case "$prev" in + -t|--tag) + __docker_image_repos_and_tags + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm --force-rm" -- "$cur" ) ) + ;; + *) + local counter="$(__docker_pos_first_nonflag '-t|--tag')" + if [ $cword -eq $counter ]; then + _filedir -d + fi + ;; + esac +} + +_docker_commit() { + case "$prev" in + -m|--message|-a|--author|--run) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-m --message -a --author --run" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '-m|--message|-a|--author|--run') + + if [ $cword -eq $counter ]; then + __docker_containers_all + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_cp() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + case "$cur" in + *:) + return + ;; + *) + __docker_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + return + ;; + esac + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + _filedir + return + fi +} + +_docker_create() { + case "$prev" in + -a|--attach) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cidfile|--env-file) + _filedir + return + ;; + --volumes-from) + __docker_containers_all + return + ;; + -v|--volume) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + compopt -o nospace + ;; + /*) + _filedir + compopt -o nospace + ;; + esac + return + ;; + -e|--env) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + compopt -o nospace + return + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi + ;; + esac +} + +_docker_diff() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi +} + +_docker_events() { + case "$prev" in + --since) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--since" -- "$cur" ) ) + ;; + *) + ;; + esac +} + +_docker_exec() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-d --detach -i --interactive -t --tty" -- "$cur" ) ) + ;; + *) + __docker_containers_running + ;; + esac +} + +_docker_export() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi +} + +_docker_help() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) + fi +} + +_docker_history() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-q --quiet --no-trunc" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi + ;; + esac +} + +_docker_images() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-q --quiet -a --all --no-trunc -v --viz -t --tree" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos + fi + ;; + esac +} + +_docker_import() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi +} + +_docker_info() { + return +} + +_docker_inspect() { + case "$prev" in + -f|--format) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-f --format" -- "$cur" ) ) + ;; + *) + __docker_containers_and_images + ;; + esac +} + +_docker_kill() { + __docker_containers_running +} + +_docker_load() { + return +} + +_docker_login() { + case "$prev" in + -u|--username|-p|--password|-e|--email) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) ) + ;; + *) + ;; + esac +} + +_docker_logs() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-f --follow" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi + ;; + esac +} + +_docker_pause() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_pauseable + fi +} + +_docker_port() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi +} + +_docker_ps() { + case "$prev" in + --since|--before) + __docker_containers_all + ;; + -n) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) ) + ;; + *) + ;; + esac +} + +_docker_pull() { + case "$prev" in + -t|--tag) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-t --tag" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '-t|--tag') + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + fi + ;; + esac +} + +_docker_push() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + fi +} + +_docker_restart() { + case "$prev" in + -t|--time) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) ) + ;; + *) + __docker_containers_all + ;; + esac +} + +_docker_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-f --force -l --link -v --volumes" -- "$cur" ) ) + return + ;; + *) + local force= + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + -f|--force) + __docker_containers_all + return + ;; + esac + done + __docker_containers_stopped + return + ;; + esac +} + +_docker_rmi() { + __docker_image_repos_and_tags_and_ids +} + +_docker_run() { + case "$prev" in + -a|--attach) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cidfile|--env-file) + _filedir + return + ;; + --volumes-from) + __docker_containers_all + return + ;; + -v|--volume) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + compopt -o nospace + ;; + /*) + _filedir + compopt -o nospace + ;; + esac + return + ;; + -e|--env) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + compopt -o nospace + return + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) ) + ;; + *) + + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt') + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi + ;; + esac +} + +_docker_save() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi +} + +_docker_search() { + case "$prev" in + -s|--stars) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) ) + ;; + *) + ;; + esac +} + +_docker_start() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-a --attach -i --interactive" -- "$cur" ) ) + ;; + *) + __docker_containers_stopped + ;; + esac +} + +_docker_stop() { + case "$prev" in + -t|--time) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) ) + ;; + *) + __docker_containers_running + ;; + esac +} + +_docker_tag() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-f --force" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_unpause() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_unpauseable + fi +} + +_docker_top() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_running + fi +} + +_docker_version() { + return +} + +_docker_wait() { + __docker_containers_all +} + +_docker() { + local commands=( + attach + build + commit + cp + create + diff + events + exec + export + history + images + import + info + insert + inspect + kill + load + login + logs + pause + port + ps + pull + push + restart + rm + rmi + run + save + search + start + stop + tag + top + unpause + version + wait + ) + + COMPREPLY=() + local cur prev words cword + _get_comp_words_by_ref -n : cur prev words cword + + local command='docker' + local counter=1 + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + -H) + (( counter++ )) + ;; + -*) + ;; + *) + command="${words[$counter]}" + cpos=$counter + (( cpos++ )) + break + ;; + esac + (( counter++ )) + done + + local completions_func=_docker_${command} + declare -F $completions_func >/dev/null && $completions_func + + return 0 +} + +complete -F _docker docker diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish new file mode 100644 index 00000000..48b0279c --- /dev/null +++ b/contrib/completion/fish/docker.fish @@ -0,0 +1,285 @@ +# docker.fish - docker completions for fish shell +# +# This file is generated by gen_docker_fish_completions.py from: +# https://github.com/barnybug/docker-fish-completion +# +# To install the completions: +# mkdir -p ~/.config/fish/completions +# cp docker.fish ~/.config/fish/completions +# +# Completion supported: +# - parameters +# - commands +# - containers +# - images +# - repositories + +function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' + for i in (commandline -opc) + if contains -- $i attach build commit cp create diff events export history images import info insert inspect kill load login logs port ps pull push restart rm rmi run save search start stop tag top version wait + return 1 + end + end + return 0 +end + +function __fish_print_docker_containers --description 'Print a list of docker containers' -a select + switch $select + case running + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n' + case stopped + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n' + case all + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n' + end +end + +function __fish_print_docker_images --description 'Print a list of docker images' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' +end + +function __fish_print_docker_repositories --description 'Print a list of docker repositories' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq +end + +# common options +complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group" +complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified' +complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API' +complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking" +complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" +complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward' +complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules" +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available' +complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' +complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers' +complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' + +# subcommands +# attach +complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach stdin' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" + +# build +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' + +# commit +complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith "' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" + +# cp +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path" + +# create +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to stdin, stdout or stderr.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom dns servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default entrypoint of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + + +# diff +complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" + +# events +complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show previously created events and then stream.' + +# export +complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" + +# history +complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" + +# images +complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'Output graph in graphviz format' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" + +# import +complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' + +# info +complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' + +# inspect +complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" + +# kill +complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" + +# load +complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' + +# login +complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' + +# logs +complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" + +# port +complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port which is NAT-ed to PRIVATE_PORT' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" + +# ps +complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' + +# pull +complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s t -l tag -d 'Download tagged image in repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" + +# push +complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to the docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" + +# restart +complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" + +# rm +complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force removal of running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# rmi +complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" + +# run +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to stdin, stdout or stderr.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: Run container in the background, print new container id' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom dns servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default entrypoint of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + +# save +complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" + +# search +complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image in the docker index' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least xxx stars' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' + +# start +complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's stdout/stderr and forward all signals to the process" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's stdin" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# stop +complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it.' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" + +# tag +complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -a '(__fish_print_docker_images)' -d "Image" + +# top +complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" + +# version +complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the docker version information' + +# wait +complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" + + diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker new file mode 100644 index 00000000..aff59ee7 --- /dev/null +++ b/contrib/completion/zsh/_docker @@ -0,0 +1,471 @@ +#compdef docker +# +# zsh completion for docker (http://docker.com) +# +# version: 0.3.0 +# github: https://github.com/felixr/docker-zsh-completion +# +# contributors: +# - Felix Riedel +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +__docker_get_containers() { + local kind expl + declare -a running stopped lines args + + kind=$1 + shift + [[ $kind = (stopped|all) ]] && args=($args -a) + + lines=(${(f)"$(_call_program commands docker ps ${args})"}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( $j < ${#header} - 1 )) { + i=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 1)) + j=$(( $i + ${${header[$i,-1]}[(i) ]} - 1)) + k=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 2)) + begin[${header[$i,$(($j-1))]}]=$i + end[${header[$i,$(($j-1))]}]=$k + } + lines=(${lines[2,-1]}) + + # Container ID + local line + local s + for line in $lines; do + s="${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}" + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + + # Names + local name + local -a names + for line in $lines; do + names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}}) + for name in $names; do + s="${name}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + done + + [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running + [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped +} + +__docker_stoppedcontainers() { + __docker_get_containers stopped "$@" +} + +__docker_runningcontainers() { + __docker_get_containers running "$@" +} + +__docker_containers () { + __docker_get_containers all "$@" +} + +__docker_images () { + local expl + declare -a images + images=(${${${${(f)"$(_call_program commands docker images)"}[2,-1]}/ ##/\\:}%% *}) + images=(${${images%\\:}#} ${${${(f)"$(_call_program commands docker images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) + _describe -t docker-images "images" images +} + +__docker_tags() { + local expl + declare -a tags + tags=(${${${${${(f)"$(_call_program commands docker images)"}#* }## #}%% *}[2,-1]}) + _describe -t docker-tags "tags" tags +} + +__docker_repositories_with_tags() { + if compset -P '*:'; then + __docker_tags + else + __docker_repositories -qS ":" + fi +} + +__docker_search() { + # declare -a dockersearch + local cache_policy + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + local searchterm cachename + searchterm="${words[$CURRENT]%/}" + cachename=_docker-search-$searchterm + + local expl + local -a result + if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ + && ! _retrieve_cache ${cachename#_}; then + _message "Searching for ${searchterm}..." + result=(${${${(f)"$(_call_program commands docker search ${searchterm})"}%% *}[2,-1]}) + _store_cache ${cachename#_} result + fi + _wanted dockersearch expl 'available images' compadd -a result +} + +__docker_caching_policy() +{ + oldp=( "$1"(Nmh+1) ) # 1 hour + (( $#oldp )) +} + + +__docker_repositories () { + local expl + declare -a repos + repos=(${${${(f)"$(_call_program commands docker images)"}%% *}[2,-1]}) + _describe -t docker-repos "repositories" repos "$@" +} + +__docker_commands () { + # local -a _docker_subcommands + local cache_policy + + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ + && ! _retrieve_cache docker_subcommands; + then + _docker_subcommands=(${${${${(f)"$(_call_program commands docker 2>&1)"}[5,-1]}## #}/ ##/:}) + _docker_subcommands=($_docker_subcommands 'help:Show help for a command') + _store_cache docker_subcommands _docker_subcommands + fi + _describe -t docker-commands "docker command" _docker_subcommands +} + +__docker_subcommand () { + local -a _command_args + case "$words[1]" in + (attach) + _arguments \ + '--no-stdin[Do not attach stdin]' \ + '--sig-proxy[Proxify all received signal]' \ + ':containers:__docker_runningcontainers' + ;; + (build) + _arguments \ + '--force-rm[Always remove intermediate containers, even after unsuccessful builds]' \ + '--no-cache[Do not use cache when building the image]' \ + '-q[Suppress verbose build output]' \ + '--rm[Remove intermediate containers after a successful build]' \ + '-t:repository:__docker_repositories_with_tags' \ + ':path or URL:_directories' + ;; + (commit) + _arguments \ + '--author=-[Author]:author: ' \ + '-m[Commit message]:message: ' \ + '--run=-[Configuration automatically applied when the image is run]:configuration: ' \ + ':container:__docker_containers' \ + ':repository:__docker_repositories_with_tags' + ;; + (cp) + _arguments \ + ':container:->container' \ + ':hostpath:_files' + case $state in + (container) + if compset -P '*:'; then + _files + else + __docker_containers -qS ":" + fi + ;; + esac + ;; + (create) + _arguments \ + '-P[Publish all exposed ports to the host]' \ + '-a[Attach to stdin, stdout or stderr]' \ + '-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + '--cidfile=-[Write the container ID to the file]:CID file:_files' \ + '*--dns=-[Set custom dns servers]:dns server: ' \ + '*-e=-[Set environment variables]:environment variable: ' \ + '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ + '*--expose=-[Expose a port from the container without publishing it]: ' \ + '-h=-[Container host name]:hostname:_hosts' \ + '-i[Keep stdin open even if not attached]' \ + '--link=-[Add link to another container]:link:->link' \ + '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ + '-m=-[Memory limit (in bytes)]:limit: ' \ + '--name=-[Container name]:name: ' \ + '*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \ + '--privileged[Give extended privileges to this container]' \ + '-t[Allocate a pseudo-tty]' \ + '-u=-[Username or UID]:user:_users' \ + '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ + '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ + '-w=-[Working directory inside the container]:directory:_directories' \ + '(-):images:__docker_images' \ + '(-):command: _command_names -e' \ + '*::arguments: _normal' + (diff|export) + _arguments '*:containers:__docker_containers' + ;; + (exec) + _arguments \ + '-d[Detached mode: leave the container running in the background]' \ + '-i[Keep stdin open even if not attached]' \ + '-t[Allocate a pseudo-tty]' \ + ':containers:__docker_runningcontainers' + ;; + (history) + _arguments \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '*:images:__docker_images' + ;; + (images) + _arguments \ + '-a[Show all images]' \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '--tree[Output graph in tree format]' \ + '--viz[Output graph in graphviz format]' \ + ':repository:__docker_repositories' + ;; + (inspect) + _arguments \ + '--format=-[Format the output using the given go template]:template: ' \ + '*:containers:__docker_containers' + ;; + (import) + _arguments \ + ':URL:(- http:// file://)' \ + ':repository:__docker_repositories_with_tags' + ;; + (info) + ;; + (import) + _arguments \ + ':URL:(- http:// file://)' \ + ':repository:__docker_repositories_with_tags' + ;; + (insert) + _arguments '1:containers:__docker_containers' \ + '2:URL:(http:// file://)' \ + '3:file:_files' + ;; + (kill) + _arguments '*:containers:__docker_runningcontainers' + ;; + (load) + ;; + (login) + _arguments \ + '-e[Email]:email: ' \ + '-p[Password]:password: ' \ + '-u[Username]:username: ' \ + ':server: ' + ;; + (logs) + _arguments \ + '-f[Follow log output]' \ + '*:containers:__docker_containers' + ;; + (port) + _arguments \ + '1:containers:__docker_runningcontainers' \ + '2:port:_ports' + ;; + (start) + _arguments \ + '-a[Attach container'"'"'s stdout/stderr and forward all signals]' \ + '-i[Attach container'"'"'s stding]' \ + '*:containers:__docker_stoppedcontainers' + ;; + (rm) + _arguments \ + '--link[Remove the specified link and not the underlying container]' \ + '-v[Remove the volumes associated to the container]' \ + '*:containers:__docker_stoppedcontainers' + ;; + (rmi) + _arguments \ + '*:images:__docker_images' + ;; + (restart|stop) + _arguments '-t[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ + '*:containers:__docker_runningcontainers' + ;; + (top) + _arguments \ + '1:containers:__docker_runningcontainers' \ + '(-)*:: :->ps-arguments' + case $state in + (ps-arguments) + _ps + ;; + esac + + ;; + (ps) + _arguments \ + '-a[Show all containers]' \ + '--before=-[Show only container created before...]:containers:__docker_containers' \ + '-l[Show only the latest created container]' \ + '-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '-s[Display sizes]' \ + '--since=-[Show only containers created since...]:containers:__docker_containers' + ;; + (tag) + _arguments \ + '-f[force]'\ + ':image:__docker_images'\ + ':repository:__docker_repositories_with_tags' + ;; + (run) + _arguments \ + '-P[Publish all exposed ports to the host]' \ + '-a[Attach to stdin, stdout or stderr]' \ + '-c[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + '--cidfile=-[Write the container ID to the file]:CID file:_files' \ + '-d[Detached mode: leave the container running in the background]' \ + '*--dns=-[Set custom dns servers]:dns server: ' \ + '*-e[Set environment variables]:environment variable: ' \ + '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ + '*--expose=-[Expose a port from the container without publishing it]: ' \ + '-h[Container host name]:hostname:_hosts' \ + '-i[Keep stdin open even if not attached]' \ + '--link=-[Add link to another container]:link:->link' \ + '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ + '-m[Memory limit (in bytes)]:limit: ' \ + '--name=-[Container name]:name: ' \ + '*-p[Expose a container'"'"'s port to the host]:port:_ports' \ + '--privileged[Give extended privileges to this container]' \ + '--rm[Remove intermediate containers when it exits]' \ + '--sig-proxy[Proxify all received signal]' \ + '-t[Allocate a pseudo-tty]' \ + '-u[Username or UID]:user:_users' \ + '*-v[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ + '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ + '-w[Working directory inside the container]:directory:_directories' \ + '(-):images:__docker_images' \ + '(-):command: _command_names -e' \ + '*::arguments: _normal' + + case $state in + (link) + if compset -P '*:'; then + _wanted alias expl 'Alias' compadd -E "" + else + __docker_runningcontainers -qS ":" + fi + ;; + esac + + ;; + (pull|search) + _arguments ':name:__docker_search' + ;; + (push) + _arguments ':images:__docker_images' + ;; + (save) + _arguments \ + ':images:__docker_images' + ;; + (wait) + _arguments ':containers:__docker_runningcontainers' + ;; + (help) + _arguments ':subcommand:__docker_commands' + ;; + (*) + _message 'Unknown sub command' + esac + +} + +_docker () { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + + local curcontext="$curcontext" state line + typeset -A opt_args + + _arguments -C \ + '-H[tcp://host:port to bind/connect to]:socket: ' \ + '(-): :->command' \ + '(-)*:: :->option-or-argument' + + if (( CURRENT == 1 )); then + + fi + case $state in + (command) + __docker_commands + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-$words[1]: + __docker_subcommand + ;; + esac +} + +_docker "$@" + +# Local Variables: +# mode: Shell-Script +# sh-indentation: 4 +# indent-tabs-mode: nil +# sh-basic-offset: 4 +# End: +# vim: ft=zsh sw=4 ts=4 et diff --git a/contrib/desktop-integration/README.md b/contrib/desktop-integration/README.md new file mode 100644 index 00000000..85a01b9e --- /dev/null +++ b/contrib/desktop-integration/README.md @@ -0,0 +1,11 @@ +Desktop Integration +=================== + +The ./contrib/desktop-integration contains examples of typical dockerized +desktop applications. + +Examples +======== + +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/contrib/desktop-integration/chromium/Dockerfile b/contrib/desktop-integration/chromium/Dockerfile new file mode 100644 index 00000000..0e0a7ce9 --- /dev/null +++ b/contrib/desktop-integration/chromium/Dockerfile @@ -0,0 +1,38 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +DOCKER_VERSION 1.3 + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/contrib/desktop-integration/gparted/Dockerfile b/contrib/desktop-integration/gparted/Dockerfile new file mode 100644 index 00000000..6db1d240 --- /dev/null +++ b/contrib/desktop-integration/gparted/Dockerfile @@ -0,0 +1,33 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +DOCKER-VERSION 1.3 + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go new file mode 100644 index 00000000..23d19f02 --- /dev/null +++ b/contrib/docker-device-tool/device_tool.go @@ -0,0 +1,170 @@ +package main + +import ( + "flag" + "fmt" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "os" + "path" + "sort" + "strconv" + "strings" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) +} + +func byteSizeFromString(arg string) (int64, error) { + digits := "" + rest := "" + last := strings.LastIndexAny(arg, "0123456789") + if last >= 0 { + digits = arg[:last+1] + rest = arg[last+1:] + } + + val, err := strconv.ParseInt(digits, 10, 64) + if err != nil { + return val, err + } + + rest = strings.ToLower(strings.TrimSpace(rest)) + + var multiplier int64 = 1 + switch rest { + case "": + multiplier = 1 + case "k", "kb": + multiplier = 1024 + case "m", "mb": + multiplier = 1024 * 1024 + case "g", "gb": + multiplier = 1024 * 1024 * 1024 + case "t", "tb": + multiplier = 1024 * 1024 * 1024 * 1024 + default: + return 0, fmt.Errorf("Unknown size unit: %s", rest) + } + + return val * multiplier, nil +} + +func main() { + root := flag.String("r", "/var/lib/docker", "Docker root dir") + flDebug := flag.Bool("D", false, "Debug mode") + + flag.Parse() + + if *flDebug { + os.Setenv("DEBUG", "1") + } + + if flag.NArg() < 1 { + usage() + } + + args := flag.Args() + + home := path.Join(*root, "devicemapper") + devices, err := devmapper.NewDeviceSet(home, false) + if err != nil { + fmt.Println("Can't initialize device mapper: ", err) + os.Exit(1) + } + + switch args[0] { + case "status": + status := devices.Status() + fmt.Printf("Pool name: %s\n", status.PoolName) + fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) + fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) + fmt.Printf("Sector size: %d\n", status.SectorSize) + fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) + fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) + break + case "list": + ids := devices.List() + sort.Strings(ids) + for _, id := range ids { + fmt.Println(id) + } + break + case "device": + if flag.NArg() < 2 { + usage() + } + status, err := devices.GetDeviceStatus(args[1]) + if err != nil { + fmt.Println("Can't get device info: ", err) + os.Exit(1) + } + fmt.Printf("Id: %d\n", status.DeviceId) + fmt.Printf("Size: %d\n", status.Size) + fmt.Printf("Transaction Id: %d\n", status.TransactionId) + fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) + fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) + fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) + break + case "resize": + if flag.NArg() < 2 { + usage() + } + + size, err := byteSizeFromString(args[1]) + if err != nil { + fmt.Println("Invalid size: ", err) + os.Exit(1) + } + + err = devices.ResizePool(size) + if err != nil { + fmt.Println("Error resizeing pool: ", err) + os.Exit(1) + } + + break + case "snap": + if flag.NArg() < 3 { + usage() + } + + err := devices.AddDevice(args[1], args[2]) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + case "remove": + if flag.NArg() < 2 { + usage() + } + + err := devices.RemoveDevice(args[1]) + if err != nil { + fmt.Println("Can't remove device: ", err) + os.Exit(1) + } + break + case "mount": + if flag.NArg() < 3 { + usage() + } + + err := devices.MountDevice(args[1], args[2], false) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + default: + fmt.Printf("Unknown command %s\n", args[0]) + usage() + + os.Exit(1) + } + + return +} diff --git a/contrib/host-integration/Dockerfile.dev b/contrib/host-integration/Dockerfile.dev new file mode 100644 index 00000000..1c0fbd83 --- /dev/null +++ b/contrib/host-integration/Dockerfile.dev @@ -0,0 +1,27 @@ +# +# This Dockerfile will create an image that allows to generate upstart and +# systemd scripts (more to come) +# +# docker-version 0.6.2 +# + +FROM ubuntu:12.10 +MAINTAINER Guillaume J. Charmes + +RUN apt-get update && apt-get install -y wget git mercurial + +# Install Go +RUN wget --no-check-certificate https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz -O go-1.1.2.tar.gz +RUN tar -xzvf go-1.1.2.tar.gz && mv /go /goroot +RUN mkdir /go + +ENV GOROOT /goroot +ENV GOPATH /go +ENV PATH $GOROOT/bin:$PATH + +RUN go get github.com/docker/docker && cd /go/src/github.com/docker/docker && git checkout v0.6.3 +ADD manager.go /manager/ +RUN cd /manager && go build -o /usr/bin/manager + +ENTRYPOINT ["/usr/bin/manager"] + diff --git a/contrib/host-integration/Dockerfile.min b/contrib/host-integration/Dockerfile.min new file mode 100644 index 00000000..60bb89b9 --- /dev/null +++ b/contrib/host-integration/Dockerfile.min @@ -0,0 +1,4 @@ +FROM busybox +MAINTAINER Guillaume J. Charmes +ADD manager /usr/bin/ +ENTRYPOINT ["/usr/bin/manager"] diff --git a/contrib/host-integration/manager.go b/contrib/host-integration/manager.go new file mode 100644 index 00000000..c0b488b2 --- /dev/null +++ b/contrib/host-integration/manager.go @@ -0,0 +1,130 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "github.com/docker/docker" + "os" + "strings" + "text/template" +) + +var templates = map[string]string{ + + "upstart": `description "{{.description}}" +author "{{.author}}" +start on filesystem and started lxc-net and started docker +stop on runlevel [!2345] +respawn +exec /home/vagrant/goroot/bin/docker start -a {{.container_id}} +`, + + "systemd": `[Unit] + Description={{.description}} + Author={{.author}} + After=docker.service + +[Service] + Restart=always + ExecStart=/usr/bin/docker start -a {{.container_id}} + ExecStop=/usr/bin/docker stop -t 2 {{.container_id}} + +[Install] + WantedBy=local.target +`, +} + +func main() { + // Parse command line for custom options + kind := flag.String("t", "upstart", "Type of manager requested") + author := flag.String("a", "", "Author of the image") + description := flag.String("d", "", "Description of the image") + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "\nUsage: manager \n\n") + flag.PrintDefaults() + } + flag.Parse() + + // We require at least the container ID + if flag.NArg() != 1 { + println(flag.NArg()) + flag.Usage() + return + } + + // Check that the requested process manager is supported + if _, exists := templates[*kind]; !exists { + panic("Unknown script template") + } + + // Load the requested template + tpl, err := template.New("processManager").Parse(templates[*kind]) + if err != nil { + panic(err) + } + + // Create stdout/stderr buffers + bufOut := bytes.NewBuffer(nil) + bufErr := bytes.NewBuffer(nil) + + // Instanciate the Docker CLI + cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil) + // Retrieve the container info + if err := cli.CmdInspect(flag.Arg(0)); err != nil { + // As of docker v0.6.3, CmdInspect always returns nil + panic(err) + } + + // If there is nothing in the error buffer, then the Docker daemon is there and the container has been found + if bufErr.Len() == 0 { + // Unmarshall the resulting container data + c := []*docker.Container{{}} + if err := json.Unmarshal(bufOut.Bytes(), &c); err != nil { + panic(err) + } + // Reset the buffers + bufOut.Reset() + bufErr.Reset() + // Retrieve the info of the linked image + if err := cli.CmdInspect(c[0].Image); err != nil { + panic(err) + } + // If there is nothing in the error buffer, then the image has been found. + if bufErr.Len() == 0 { + // Unmarshall the resulting image data + img := []*docker.Image{{}} + if err := json.Unmarshal(bufOut.Bytes(), &img); err != nil { + panic(err) + } + // If no author has been set, use the one from the image + if *author == "" && img[0].Author != "" { + *author = strings.Replace(img[0].Author, "\"", "", -1) + } + // If no description has been set, use the comment from the image + if *description == "" && img[0].Comment != "" { + *description = strings.Replace(img[0].Comment, "\"", "", -1) + } + } + } + + /// Old version: Wrtie the resulting script to file + // f, err := os.OpenFile(kind, os.O_CREATE|os.O_WRONLY, 0755) + // if err != nil { + // panic(err) + // } + // defer f.Close() + + // Create a map with needed data + data := map[string]string{ + "author": *author, + "description": *description, + "container_id": flag.Arg(0), + } + + // Process the template and output it on Stdout + if err := tpl.Execute(os.Stdout, data); err != nil { + panic(err) + } +} diff --git a/contrib/host-integration/manager.sh b/contrib/host-integration/manager.sh new file mode 100755 index 00000000..8ea296f5 --- /dev/null +++ b/contrib/host-integration/manager.sh @@ -0,0 +1,53 @@ +#!/bin/sh +set -e + +usage() { + echo >&2 "usage: $0 [-a author] [-d description] container [manager]" + echo >&2 " ie: $0 -a 'John Smith' 4ec9612a37cd systemd" + echo >&2 " ie: $0 -d 'Super Cool System' 4ec9612a37cd # defaults to upstart" + exit 1 +} + +auth='' +desc='' +have_auth= +have_desc= +while getopts a:d: opt; do + case "$opt" in + a) + auth="$OPTARG" + have_auth=1 + ;; + d) + desc="$OPTARG" + have_desc=1 + ;; + esac +done +shift $(($OPTIND - 1)) + +[ $# -ge 1 -a $# -le 2 ] || usage + +cid="$1" +script="${2:-upstart}" +if [ ! -e "manager/$script" ]; then + echo >&2 "Error: manager type '$script' is unknown (PRs always welcome!)." + echo >&2 'The currently supported types are:' + echo >&2 " $(cd manager && echo *)" + exit 1 +fi + +# TODO https://github.com/docker/docker/issues/734 (docker inspect formatting) +#if command -v docker > /dev/null 2>&1; then +# image="$(docker inspect -f '{{.Image}}' "$cid")" +# if [ "$image" ]; then +# if [ -z "$have_auth" ]; then +# auth="$(docker inspect -f '{{.Author}}' "$image")" +# fi +# if [ -z "$have_desc" ]; then +# desc="$(docker inspect -f '{{.Comment}}' "$image")" +# fi +# fi +#fi + +exec "manager/$script" "$cid" "$auth" "$desc" diff --git a/contrib/host-integration/manager/systemd b/contrib/host-integration/manager/systemd new file mode 100755 index 00000000..0431b3ce --- /dev/null +++ b/contrib/host-integration/manager/systemd @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +cid="$1" +auth="$2" +desc="$3" + +cat <<-EOF + [Unit] + Description=$desc + Author=$auth + After=docker.service + + [Service] + ExecStart=/usr/bin/docker start -a $cid + ExecStop=/usr/bin/docker stop -t 2 $cid + + [Install] + WantedBy=local.target +EOF diff --git a/contrib/host-integration/manager/upstart b/contrib/host-integration/manager/upstart new file mode 100755 index 00000000..af90f1fd --- /dev/null +++ b/contrib/host-integration/manager/upstart @@ -0,0 +1,15 @@ +#!/bin/sh +set -e + +cid="$1" +auth="$2" +desc="$3" + +cat <<-EOF + description "$(echo "$desc" | sed 's/"/\\"/g')" + author "$(echo "$auth" | sed 's/"/\\"/g')" + start on filesystem and started lxc-net and started docker + stop on runlevel [!2345] + respawn + exec /usr/bin/docker start -a "$cid" +EOF diff --git a/contrib/init/openrc/docker.confd b/contrib/init/openrc/docker.confd new file mode 100644 index 00000000..ae247c00 --- /dev/null +++ b/contrib/init/openrc/docker.confd @@ -0,0 +1,13 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +#DOCKER_LOGFILE="/var/log/docker.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# where the docker daemon itself is run from +#DOCKER_BINARY="/usr/bin/docker" + +# any other random options you want to pass to docker +DOCKER_OPTS="" diff --git a/contrib/init/openrc/docker.initd b/contrib/init/openrc/docker.initd new file mode 100755 index 00000000..a9d21b17 --- /dev/null +++ b/contrib/init/openrc/docker.initd @@ -0,0 +1,34 @@ +#!/sbin/runscript +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log} +DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid} +DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker} +DOCKER_OPTS=${DOCKER_OPTS:-} + +start() { + checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + ulimit -u 1048576 + + ebegin "Starting docker daemon" + start-stop-daemon --start --background \ + --exec "$DOCKER_BINARY" \ + --pidfile "$DOCKER_PIDFILE" \ + --stdout "$DOCKER_LOGFILE" \ + --stderr "$DOCKER_LOGFILE" \ + -- -d -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS + eend $? +} + +stop() { + ebegin "Stopping docker daemon" + start-stop-daemon --stop \ + --exec "$DOCKER_BINARY" \ + --pidfile "$DOCKER_PIDFILE" + eend $? +} diff --git a/contrib/init/systemd/MAINTAINERS b/contrib/init/systemd/MAINTAINERS new file mode 100644 index 00000000..760a76d6 --- /dev/null +++ b/contrib/init/systemd/MAINTAINERS @@ -0,0 +1,2 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service new file mode 100644 index 00000000..83c810d1 --- /dev/null +++ b/contrib/init/systemd/docker.service @@ -0,0 +1,13 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=http://docs.docker.com +After=network.target docker.socket +Requires=docker.socket + +[Service] +ExecStart=/usr/bin/docker -d -H fd:// +LimitNOFILE=1048576 +LimitNPROC=1048576 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/init/systemd/docker.socket b/contrib/init/systemd/docker.socket new file mode 100644 index 00000000..7dd95098 --- /dev/null +++ b/contrib/init/systemd/docker.socket @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker new file mode 100755 index 00000000..cf33c837 --- /dev/null +++ b/contrib/init/sysvinit-debian/docker @@ -0,0 +1,141 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=$(basename $0) + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKER=/usr/bin/$BASE +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) +if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 +fi + +# Check docker is present +if [ ! -x $DOCKER ]; then + log_failure_msg "$DOCKER not present or not executable" + exit 1 +fi + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + if [ "$BASH" ]; then + ulimit -u 1048576 + else + ulimit -p 1048576 + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKER" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + -d -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + fail_unless_root + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" + log_end_msg $? + ;; + + restart) + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + fail_unless_root + $0 restart + ;; + + status) + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC" + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac diff --git a/contrib/init/sysvinit-debian/docker.default b/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 00000000..14e66017 --- /dev/null +++ b/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,13 @@ +# Docker Upstart and SysVinit configuration file + +# Customize location of Docker binary (especially for development testing). +#DOCKER="/usr/local/bin/docker" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker new file mode 100755 index 00000000..eadf02c7 --- /dev/null +++ b/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,130 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.com +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.com + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.com +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +exec="/usr/bin/$prog" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + [ -x $exec ] || exit 5 + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + $exec -d $other_args &>> $logfile & + pid=$! + touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + done + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile -d 300 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/contrib/init/sysvinit-redhat/docker.sysconfig b/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 00000000..9c99dd19 --- /dev/null +++ b/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker -d + +other_args="" diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf new file mode 100644 index 00000000..5a3f8888 --- /dev/null +++ b/contrib/init/upstart/docker.conf @@ -0,0 +1,41 @@ +description "Docker daemon" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] +limit nofile 524288 1048576 +limit nproc 524288 1048576 + +respawn + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKER=/usr/bin/$UPSTART_JOB + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKER" -d $DOCKER_OPTS +end script diff --git a/contrib/mkimage-alpine.sh b/contrib/mkimage-alpine.sh new file mode 100755 index 00000000..b9869ae6 --- /dev/null +++ b/contrib/mkimage-alpine.sh @@ -0,0 +1,82 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $REPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $REPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $REPO > $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:s" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +REPO=$MIRROR/$REL/main +ARCH=$(uname -m) + +tmp +getapk +mkbase +conf +pack +save diff --git a/contrib/mkimage-arch-pacman.conf b/contrib/mkimage-arch-pacman.conf new file mode 100644 index 00000000..45fe03dc --- /dev/null +++ b/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh new file mode 100755 index 00000000..e83b2b67 --- /dev/null +++ b/contrib/mkimage-arch.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for archlinux and load it into the local +# docker as "archlinux" +# requires root +set -e + +hash pacstrap &>/dev/null || { + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 +} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS + +# packages to ignore for space savings +PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs + +expect < $ROOTFS/etc/locale.gen +arch-chroot $ROOTFS locale-gen +arch-chroot $ROOTFS /bin/sh -c 'echo "Server = https://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist' + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 +ln -sf /proc/self/fd $DEV/fd + +tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux +docker run -i -t archlinux echo Success. +rm -rf $ROOTFS diff --git a/contrib/mkimage-busybox.sh b/contrib/mkimage-busybox.sh new file mode 100755 index 00000000..b11a6bb2 --- /dev/null +++ b/contrib/mkimage-busybox.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Generate a very minimal filesystem based on busybox-static, +# and load it into the local docker under the name "busybox". + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + +BUSYBOX=$(which busybox) +[ "$BUSYBOX" ] || { + echo "Sorry, I could not locate busybox." + echo "Try 'apt-get install busybox-static'?" + exit 1 +} + +set -e +ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM +mkdir $ROOTFS +cd $ROOTFS + +mkdir bin etc dev dev/pts lib proc sys tmp +touch etc/resolv.conf +cp /etc/nsswitch.conf etc/nsswitch.conf +echo root:x:0:0:root:/:/bin/sh > etc/passwd +echo root:x:0: > etc/group +ln -s lib lib64 +ln -s bin sbin +cp $BUSYBOX bin +for X in $(busybox --list) +do + ln -s busybox bin/$X +done +rm bin/init +ln bin/busybox bin/init +cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib +cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib +for X in console null ptmx random stdin stdout stderr tty urandom zero +do + cp -a /dev/$X dev +done + +tar --numeric-owner -cf- . | docker import - busybox +docker run -i -u root busybox /bin/echo Success. diff --git a/contrib/mkimage-crux.sh b/contrib/mkimage-crux.sh new file mode 100755 index 00000000..3f0bdcae --- /dev/null +++ b/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh new file mode 100755 index 00000000..d9d6aae6 --- /dev/null +++ b/contrib/mkimage-debootstrap.sh @@ -0,0 +1,297 @@ +#!/usr/bin/env bash +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + +variant='minbase' +include='iproute,iputils-ping' +arch='amd64' # intentionally undocumented for now +skipDetection= +strictDebootstrap= +justTar= + +usage() { + echo >&2 + + echo >&2 "usage: $0 [options] repo suite [mirror]" + + echo >&2 + echo >&2 'options: (not recommended)' + echo >&2 " -p set an http_proxy for debootstrap" + echo >&2 " -v $variant # change default debootstrap variant" + echo >&2 " -i $include # change default package includes" + echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" + echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" + echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" + echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" + + echo >&2 + echo >&2 " ie: $0 username/debian squeeze" + echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" + + echo >&2 + echo >&2 " ie: $0 username/ubuntu precise" + echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" + + echo >&2 + echo >&2 " ie: $0 -t precise.tar.bz2 precise" + echo >&2 " $0 -t wheezy.tgz wheezy" + echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" + + echo >&2 +} + +# these should match the names found at http://www.debian.org/releases/ +debianStable=wheezy +debianUnstable=sid +# this should match the name found at http://releases.ubuntu.com/ +ubuntuLatestLTS=trusty +# this should match the name found at http://releases.tanglu.org/ +tangluLatest=aequorea + +while getopts v:i:a:p:dst name; do + case "$name" in + p) + http_proxy="$OPTARG" + ;; + v) + variant="$OPTARG" + ;; + i) + include="$OPTARG" + ;; + a) + arch="$OPTARG" + ;; + d) + strictDebootstrap=1 + ;; + s) + skipDetection=1 + ;; + t) + justTar=1 + ;; + ?) + usage + exit 0 + ;; + esac +done +shift $(($OPTIND - 1)) + +repo="$1" +suite="$2" +mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided + +if [ ! "$repo" ] || [ ! "$suite" ]; then + usage + exit 1 +fi + +# some rudimentary detection for whether we need to "sudo" our docker calls +docker='' +if docker version > /dev/null 2>&1; then + docker='docker' +elif sudo docker version > /dev/null 2>&1; then + docker='sudo docker' +elif command -v docker > /dev/null 2>&1; then + docker='docker' +else + echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" + echo >&2 " this script is not likely to work as expected" + sleep 3 + docker='docker' # give us a command-not-found later +fi + +# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory +if [ "$justTar" ]; then + if [ ! -d "$(dirname "$repo")" ]; then + echo >&2 "error: $(dirname "$repo") does not exist" + exit 1 + fi + repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" +fi + +# will be filled in later, if [ -z "$skipDetection" ] +lsbDist='' + +target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +if [ "$suite" = 'lucid' ]; then + # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails + include+=',gpgv' +fi + +set -x + +# bootstrap +mkdir -p "$target" +sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" + +cd "$target" + +if [ -z "$strictDebootstrap" ]; then + # prevent init scripts from running during install/update + # policy-rc.d (for most scripts) + echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null + sudo chmod +x usr/sbin/policy-rc.d + # initctl (for some pesky upstart scripts) + sudo chroot . dpkg-divert --local --rename --add /sbin/initctl + sudo ln -sf /bin/true sbin/initctl + # see https://github.com/docker/docker/issues/446#issuecomment-16953173 + + # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) + sudo chroot . apt-get clean + + if strings usr/bin/dpkg | grep -q unsafe-io; then + # while we're at it, apt is unnecessarily slow inside containers + # this forces dpkg not to call sync() after package extraction and speeds up install + # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization + echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null + # we have this wrapped up in an "if" because the "force-unsafe-io" + # option was added in dpkg 1.15.8.6 + # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), + # and ubuntu lucid/10.04 only has 1.15.5.6 + fi + + # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) + { + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo "DPkg::Post-Invoke { ${aptGetClean} };" + echo "APT::Update::Post-Invoke { ${aptGetClean} };" + echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' + } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null + + # and remove the translations, too + echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null + + # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): + # rm /usr/sbin/policy-rc.d + # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl + # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup + # rm /etc/apt/apt.conf.d/no-cache + # rm /etc/apt/apt.conf.d/no-languages + + if [ -z "$skipDetection" ]; then + # see also rudimentary platform detection in hack/install.sh + lsbDist='' + if [ -r etc/lsb-release ]; then + lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then + lsbDist='Debian' + fi + + case "$lsbDist" in + Debian) + # add the updates and security repositories + if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then + # ${suite}-updates only applies to non-unstable + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + + # same for security updates + echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null + fi + ;; + Ubuntu) + # add the universe, updates, and security repositories + sudo sed -i " + s/ $suite main$/ $suite main universe/; p; + s/ $suite main/ ${suite}-updates main/; p; + s/ $suite-updates main/ ${suite}-security main/ + " etc/apt/sources.list + ;; + Tanglu) + # add the updates repository + if [ "$suite" = "$tangluLatest" ]; then + # ${suite}-updates only applies to stable Tanglu versions + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + fi + ;; + SteamOS) + # add contrib and non-free + sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list + ;; + esac + fi + + # make sure our packages lists are as up to date as we can get them + sudo chroot . apt-get update + sudo chroot . apt-get dist-upgrade -y +fi + +if [ "$justTar" ]; then + # create the tarball file so it has the right permissions (ie, not root) + touch "$repo" + + # fill the tarball + sudo tar --numeric-owner -caf "$repo" . +else + # create the image (and tag $repo:$suite) + sudo tar --numeric-owner -c . | $docker import - $repo:$suite + + # test the image + $docker run -i -t $repo:$suite echo success + + if [ -z "$skipDetection" ]; then + case "$lsbDist" in + Debian) + if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + + if [ -r etc/debian_version ]; then + # tag the specific debian release version (which is only reasonable to tag on debian stable) + ver=$(cat etc/debian_version) + $docker tag $repo:$suite $repo:$ver + fi + fi + ;; + Ubuntu) + if [ "$suite" = "$ubuntuLatestLTS" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Ubuntu version number, if available (12.04, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + Tanglu) + if [ "$suite" = "$tangluLatest" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Tanglu version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + SteamOS) + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific SteamOS version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + esac + fi +fi + +# cleanup +cd "$returnTo" +sudo rm -rf "$target" diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh new file mode 100755 index 00000000..69a8bc8f --- /dev/null +++ b/contrib/mkimage-rinse.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. + +# This script is useful on systems with rinse available (e.g., +# building a CentOS image on Debian). See contrib/mkimage-yum.sh for +# a way to build CentOS images on systems with yum installed. + +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + +repo="$1" +distro="$2" +mirror="$3" + +if [ ! "$repo" ] || [ ! "$distro" ]; then + self="$(basename $0)" + echo >&2 "usage: $self repo distro [mirror]" + echo >&2 + echo >&2 " ie: $self username/centos centos-5" + echo >&2 " $self username/centos centos-6" + echo >&2 + echo >&2 " ie: $self username/slc slc-5" + echo >&2 " $self username/slc slc-6" + echo >&2 + echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" + echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" + echo >&2 + echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' + echo >&2 ' expected values of "mirror".' + echo >&2 + echo >&2 'This script is tested to work with the original upstream version of rinse,' + echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' + echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' + echo >&2 + exit 1 +fi + +target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) +if [ "$mirror" ]; then + rinseArgs+=( --mirror "$mirror" ) +fi + +set -x + +mkdir -p "$target" + +sudo rinse "${rinseArgs[@]}" + +cd "$target" + +# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own +sudo rm -rf dev +sudo mkdir -m 755 dev +( + cd dev + sudo ln -sf /proc/self/fd ./ + sudo mkdir -m 755 pts + sudo mkdir -m 1777 shm + sudo mknod -m 600 console c 5 1 + sudo mknod -m 600 initctl p + sudo mknod -m 666 full c 1 7 + sudo mknod -m 666 null c 1 3 + sudo mknod -m 666 ptmx c 5 2 + sudo mknod -m 666 random c 1 8 + sudo mknod -m 666 tty c 5 0 + sudo mknod -m 666 tty0 c 4 0 + sudo mknod -m 666 urandom c 1 9 + sudo mknod -m 666 zero c 1 5 +) + +# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" +# locales +sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} +# docs +sudo rm -rf usr/share/{man,doc,info,gnome/help} +# cracklib +sudo rm -rf usr/share/cracklib +# i18n +sudo rm -rf usr/share/i18n +# yum cache +sudo rm -rf var/cache/yum +sudo mkdir -p --mode=0755 var/cache/yum +# sln +sudo rm -rf sbin/sln +# ldconfig +#sudo rm -rf sbin/ldconfig +sudo rm -rf etc/ld.so.cache var/cache/ldconfig +sudo mkdir -p --mode=0755 var/cache/ldconfig + +# allow networking init scripts inside the container to work without extra steps +echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null + +# to restore locales later: +# yum reinstall glibc-common + +version= +if [ -r etc/redhat-release ]; then + version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" +elif [ -r etc/SuSE-release ]; then + version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" +fi + +if [ -z "$version" ]; then + echo >&2 "warning: cannot autodetect OS version, using $distro as tag" + sleep 20 + version="$distro" +fi + +sudo tar --numeric-owner -c . | docker import - $repo:$version + +docker run -i -t $repo:$version echo success + +cd "$returnTo" +sudo rm -rf "$target" diff --git a/contrib/mkimage-unittest.sh b/contrib/mkimage-unittest.sh new file mode 100755 index 00000000..feebb17b --- /dev/null +++ b/contrib/mkimage-unittest.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Generate a very minimal filesystem based on busybox-static, +# and load it into the local docker under the name "docker-ut". + +missing_pkg() { + echo "Sorry, I could not locate $1" + echo "Try 'apt-get install ${2:-$1}'?" + exit 1 +} + +BUSYBOX=$(which busybox) +[ "$BUSYBOX" ] || missing_pkg busybox busybox-static +SOCAT=$(which socat) +[ "$SOCAT" ] || missing_pkg socat + +shopt -s extglob +set -ex +ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX` +trap "rm -rf $ROOTFS" INT QUIT TERM +cd $ROOTFS + +mkdir bin etc dev dev/pts lib proc sys tmp +touch etc/resolv.conf +cp /etc/nsswitch.conf etc/nsswitch.conf +echo root:x:0:0:root:/:/bin/sh > etc/passwd +echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd +echo root:x:0: > etc/group +echo daemon:x:1: >> etc/group +ln -s lib lib64 +ln -s bin sbin +cp $BUSYBOX $SOCAT bin +for X in $(busybox --list) +do + ln -s busybox bin/$X +done +rm bin/init +ln bin/busybox bin/init +cp -P /lib/x86_64-linux-gnu/lib{pthread*,c*(-*),dl*(-*),nsl*(-*),nss_*,util*(-*),wrap,z}.so* lib +cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib +cp -P /usr/lib/x86_64-linux-gnu/lib{crypto,ssl}.so* lib +for X in console null ptmx random stdin stdout stderr tty urandom zero +do + cp -a /dev/$X dev +done + +chmod 0755 $ROOTFS # See #486 +tar --numeric-owner -cf- . | docker import - docker-ut +docker run -i -u root docker-ut /bin/echo Success. +rm -rf $ROOTFS diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh new file mode 100755 index 00000000..f21a63a2 --- /dev/null +++ b/contrib/mkimage-yum.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +usage() { + cat < +OPTIONS: + -y The path to the yum config to install packages from. The + default is /etc/yum.conf. +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +while getopts ":y:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +#-------------------- + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +mkdir -m 755 "$target"/dev +mknod -m 600 "$target"/dev/console c 5 1 +mknod -m 600 "$target"/dev/initctl p +mknod -m 666 "$target"/dev/full c 1 7 +mknod -m 666 "$target"/dev/null c 1 3 +mknod -m 666 "$target"/dev/ptmx c 5 2 +mknod -m 666 "$target"/dev/random c 1 8 +mknod -m 666 "$target"/dev/tty c 5 0 +mknod -m 666 "$target"/dev/tty0 c 4 0 +mknod -m 666 "$target"/dev/urandom c 1 9 +mknod -m 666 "$target"/dev/zero c 1 5 + +yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall Core +yum -c "$yum_config" --installroot="$target" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version +docker run -i -t $name:$version echo success + +rm -rf "$target" diff --git a/contrib/mkimage.sh b/contrib/mkimage.sh new file mode 100755 index 00000000..cd2fa748 --- /dev/null +++ b/contrib/mkimage.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +optTemp=$(getopt --options '+d:t:h' --longoptions 'dir:,tag:,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar.xz" +touch "$tarFile" + +( + set -x + tar --numeric-owner -caf "$tarFile" -C "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <<'EOF' +FROM scratch +ADD rootfs.tar.xz / +EOF + +# if our generated image has a decent shell, let's set a default command +for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do + if [ -x "$rootfsDir/$shell" ]; then + ( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/contrib/mkimage/.febootstrap-minimize b/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 00000000..8a71f5ed --- /dev/null +++ b/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/contrib/mkimage/busybox-static b/contrib/mkimage/busybox-static new file mode 100755 index 00000000..e15322b4 --- /dev/null +++ b/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap new file mode 100755 index 00000000..fcda4978 --- /dev/null +++ b/contrib/mkimage/debootstrap @@ -0,0 +1,193 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +shift + +( + set -x + debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF' +#!/bin/sh + +# For most Docker users, "apt-get install" only happens during "docker build", +# where starting services doesn't work and often fails in humorous ways. This +# prevents those failures by stopping the services from attempting to start. + +exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + chroot "$rootfsDir" dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; chroot "$rootfsDir" apt-get clean ) + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + # LTS + if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then + head -1 "$rootfsDir/etc/apt/sources.list" \ + | sed "s/ $suite / squeeze-lts /" \ + >> "$rootfsDir/etc/apt/sources.list" + fi + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + chroot "$rootfsDir" bash -c 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff --git a/contrib/mkimage/mageia-urpmi b/contrib/mkimage/mageia-urpmi new file mode 100755 index 00000000..93fb289c --- /dev/null +++ b/contrib/mkimage/mageia-urpmi @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff --git a/contrib/mkimage/rinse b/contrib/mkimage/rinse new file mode 100755 index 00000000..75eb4f0d --- /dev/null +++ b/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/contrib/mkseccomp.pl b/contrib/mkseccomp.pl new file mode 100755 index 00000000..28d0645a --- /dev/null +++ b/contrib/mkseccomp.pl @@ -0,0 +1,77 @@ +#!/usr/bin/perl +# +# A simple helper script to help people build seccomp profiles for +# Docker/LXC. The goal is mostly to reduce the attack surface to the +# kernel, by restricting access to rarely used, recently added or not used +# syscalls. +# +# This script processes one or more files which contain the list of system +# calls to be allowed. See mkseccomp.sample for more information how you +# can configure the list of syscalls. When run, this script produces output +# which, when stored in a file, can be passed to docker as follows: +# +# docker run --lxc-conf="lxc.seccomp=$file" +# +# The included sample file shows how to cut about a quarter of all syscalls, +# which affecting most applications. +# +# For specific situations it is possible to reduce the list further. By +# reducing the list to just those syscalls required by a certain application +# you can make it difficult for unknown/unexpected code to run. +# +# Run this script as follows: +# +# ./mkseccomp.pl < mkseccomp.sample >syscalls.list +# or +# ./mkseccomp.pl mkseccomp.sample >syscalls.list +# +# Multiple files can be specified, in which case the lists of syscalls are +# combined. +# +# By Martijn van Oosterhout Nov 2013 + +# How it works: +# +# This program basically spawns two processes to form a chain like: +# +# | cpp | + +use strict; +use warnings; + +if( -t ) { + print STDERR "Helper script to make seccomp filters for Docker/LXC.\n"; + print STDERR "Usage: mkseccomp.pl < [files...]\n"; + exit 1; +} + +my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n"; + +if($pid == 0) { # Child + $pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n"; + + if($pid == 0) { # Child, which execs cpp + exec "cpp" or die "Couldn't exec cpp ($!)\n"; + exit 1; + } + + # Process the DATA section and output to cpp + print $out "#include \n"; + while(<>) { + if(/^\w/) { + print $out "__NR_$_"; + } + } + close $out; + exit 0; + +} + +# Print header and then process output from cpp. +print "1\n"; +print "whitelist\n"; + +while(<$in>) { + print if( /^[0-9]/ ); +} + diff --git a/contrib/mkseccomp.sample b/contrib/mkseccomp.sample new file mode 100644 index 00000000..7a0c8d19 --- /dev/null +++ b/contrib/mkseccomp.sample @@ -0,0 +1,444 @@ +/* This sample file is an example for mkseccomp.pl to produce a seccomp file + * which restricts syscalls that are only useful for an admin but allows the + * vast majority of normal userspace programs to run normally. + * + * The format of this file is one line per syscall. This is then processed + * and passed to 'cpp' to convert the names to numbers using whatever is + * correct for your platform. As such C-style comments are permitted. Note + * this also means that C preprocessor macros are also allowed. So it is + * possible to create groups surrounded by #ifdef/#endif and control their + * inclusion via #define (not #include). + * + * Syscalls that don't exist on your architecture are silently filtered out. + * Syscalls marked with (*) are required for a container to spawn a bash + * shell successfully (not necessarily full featured). Listing the same + * syscall multiple times is no problem. + * + * If you want to make a list specifically for one application the easiest + * way is to run the application under strace, like so: + * + * $ strace -f -q -c -o strace.out application args... + * + * Once you have a reasonable sample of the execution of the program, exit + * it. The file strace.out will have a summary of the syscalls used. Copy + * that list into this file, comment out everything else except the starred + * syscalls (which you need for the container to start) and you're done. + * + * To get the list of syscalls from the strace output this works well for + * me + * + * $ cut -c52 < strace.out + * + * This sample list was compiled as a combination of all the syscalls + * available on i386 and amd64 on Ubuntu Precise, as such it may not contain + * everything and not everything may be relevent for your system. This + * shouldn't be a problem. + */ + +// Filesystem/File descriptor related +access // (*) +chdir // (*) +chmod +chown +chown32 +close // (*) +creat +dup // (*) +dup2 // (*) +dup3 +epoll_create +epoll_create1 +epoll_ctl +epoll_ctl_old +epoll_pwait +epoll_wait +epoll_wait_old +eventfd +eventfd2 +faccessat // (*) +fadvise64 +fadvise64_64 +fallocate +fanotify_init +fanotify_mark +ioctl // (*) +fchdir +fchmod +fchmodat +fchown +fchown32 +fchownat +fcntl // (*) +fcntl64 +fdatasync +fgetxattr +flistxattr +flock +fremovexattr +fsetxattr +fstat // (*) +fstat64 +fstatat64 +fstatfs +fstatfs64 +fsync +ftruncate +ftruncate64 +getcwd // (*) +getdents // (*) +getdents64 +getxattr +inotify_add_watch +inotify_init +inotify_init1 +inotify_rm_watch +io_cancel +io_destroy +io_getevents +io_setup +io_submit +lchown +lchown32 +lgetxattr +link +linkat +listxattr +llistxattr +llseek +_llseek +lremovexattr +lseek // (*) +lsetxattr +lstat +lstat64 +mkdir +mkdirat +mknod +mknodat +newfstatat +_newselect +oldfstat +oldlstat +oldolduname +oldstat +olduname +oldwait4 +open // (*) +openat // (*) +pipe // (*) +pipe2 +poll +ppoll +pread64 +preadv +futimesat +pselect6 +pwrite64 +pwritev +read // (*) +readahead +readdir +readlink +readlinkat +readv +removexattr +rename +renameat +rmdir +select +sendfile +sendfile64 +setxattr +splice +stat // (*) +stat64 +statfs // (*) +statfs64 +symlink +symlinkat +sync +sync_file_range +sync_file_range2 +syncfs +tee +truncate +truncate64 +umask +unlink +unlinkat +ustat +utime +utimensat +utimes +write // (*) +writev + +// Network related +accept +accept4 +bind // (*) +connect // (*) +getpeername +getsockname // (*) +getsockopt +listen +recv +recvfrom // (*) +recvmmsg +recvmsg +send +sendmmsg +sendmsg +sendto // (*) +setsockopt +shutdown +socket // (*) +socketcall +socketpair +sethostname // (*) + +// Signal related +pause +rt_sigaction // (*) +rt_sigpending +rt_sigprocmask // (*) +rt_sigqueueinfo +rt_sigreturn // (*) +rt_sigsuspend +rt_sigtimedwait +rt_tgsigqueueinfo +sigaction +sigaltstack // (*) +signal +signalfd +signalfd4 +sigpending +sigprocmask +sigreturn +sigsuspend + +// Other needed POSIX +alarm +brk // (*) +clock_adjtime +clock_getres +clock_gettime +clock_nanosleep +//clock_settime +gettimeofday +nanosleep +nice +sysinfo +syslog +time +timer_create +timer_delete +timerfd_create +timerfd_gettime +timerfd_settime +timer_getoverrun +timer_gettime +timer_settime +times +uname // (*) + +// Memory control +madvise +mbind +mincore +mlock +mlockall +mmap // (*) +mmap2 +mprotect // (*) +mremap +msync +munlock +munlockall +munmap // (*) +remap_file_pages +set_mempolicy +vmsplice + +// Process control +capget +capset // (*) +clone // (*) +execve // (*) +exit // (*) +exit_group // (*) +fork +getcpu +getpgid +getpgrp // (*) +getpid // (*) +getppid // (*) +getpriority +getresgid +getresgid32 +getresuid +getresuid32 +getrlimit // (*) +getrusage +getsid +getuid // (*) +getuid32 +getegid // (*) +getegid32 +geteuid // (*) +geteuid32 +getgid // (*) +getgid32 +getgroups +getgroups32 +getitimer +get_mempolicy +kill +//personality +prctl +prlimit64 +sched_getaffinity +sched_getparam +sched_get_priority_max +sched_get_priority_min +sched_getscheduler +sched_rr_get_interval +//sched_setaffinity +//sched_setparam +//sched_setscheduler +sched_yield +setfsgid +setfsgid32 +setfsuid +setfsuid32 +setgid +setgid32 +setgroups +setgroups32 +setitimer +setpgid // (*) +setpriority +setregid +setregid32 +setresgid +setresgid32 +setresuid +setresuid32 +setreuid +setreuid32 +setrlimit +setsid +setuid +setuid32 +ugetrlimit +vfork +wait4 // (*) +waitid +waitpid + +// IPC +ipc +mq_getsetattr +mq_notify +mq_open +mq_timedreceive +mq_timedsend +mq_unlink +msgctl +msgget +msgrcv +msgsnd +semctl +semget +semop +semtimedop +shmat +shmctl +shmdt +shmget + +// Linux specific, mostly needed for thread-related stuff +arch_prctl // (*) +get_robust_list +get_thread_area +gettid +futex // (*) +restart_syscall // (*) +set_robust_list // (*) +set_thread_area +set_tid_address // (*) +tgkill +tkill + +// Admin syscalls, these are blocked +//acct +//adjtimex +//bdflush +//chroot +//create_module +//delete_module +//get_kernel_syms // Obsolete +//idle // Obsolete +//init_module +//ioperm +//iopl +//ioprio_get +//ioprio_set +//kexec_load +//lookup_dcookie // oprofile only? +//migrate_pages // NUMA +//modify_ldt +//mount +//move_pages // NUMA +//name_to_handle_at // NFS server +//nfsservctl // NFS server +//open_by_handle_at // NFS server +//perf_event_open +//pivot_root +//process_vm_readv // For debugger +//process_vm_writev // For debugger +//ptrace // For debugger +//query_module +//quotactl +//reboot +//setdomainname +//setns +//settimeofday +//sgetmask // Obsolete +//ssetmask // Obsolete +//stime +//swapoff +//swapon +//_sysctl +//sysfs +//sys_setaltroot +//umount +//umount2 +//unshare +//uselib +//vhangup +//vm86 +//vm86old + +// Kernel key management +//add_key +//keyctl +//request_key + +// Unimplemented +//afs_syscall +//break +//ftime +//getpmsg +//gtty +//lock +//madvise1 +//mpx +//prof +//profil +//putpmsg +//security +//stty +//tuxcall +//ulimit +//vserver diff --git a/contrib/nuke-graph-directory.sh b/contrib/nuke-graph-directory.sh new file mode 100755 index 00000000..f44c45a1 --- /dev/null +++ b/contrib/nuke-graph-directory.sh @@ -0,0 +1,64 @@ +#!/bin/sh +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs &> /dev/null; then + root="$(df "$dir" | awk 'NR>1 { print $NF }')" + for subvol in $(btrfs subvolume list -o "$root" 2>/dev/null | awk -F' path ' '{ print $2 }'); do + subvolDir="$root/$subvol" + if dir_in_dir "$subvolDir" "$dir"; then + ( set -x; btrfs subvolume delete "$subvolDir" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( set -x; rm -rf "$dir" ) diff --git a/contrib/syntax/kate/Dockerfile.xml b/contrib/syntax/kate/Dockerfile.xml new file mode 100644 index 00000000..e5602397 --- /dev/null +++ b/contrib/syntax/kate/Dockerfile.xml @@ -0,0 +1,68 @@ + + + + + + + FROM + MAINTAINER + ENV + RUN + ONBUILD + COPY + ADD + VOLUME + EXPOSE + ENTRYPOINT + CMD + WORKDIR + USER + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences new file mode 100644 index 00000000..20f0d04c --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences @@ -0,0 +1,24 @@ + + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage new file mode 100644 index 00000000..1d19a3ba --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -0,0 +1,93 @@ + + + + + name + Dockerfile + fileTypes + + Dockerfile + + patterns + + + match + ^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR|COPY)\s + captures + + 0 + + name + keyword.control.dockerfile + + 1 + + name + keyword.other.special-method.dockerfile + + + + + match + ^\s*(ONBUILD\s+)?(CMD|ENTRYPOINT)\s + captures + + 0 + + name + keyword.operator.dockerfile + + 1 + + name + keyword.other.special-method.dockerfile + + + + + begin + " + end + " + name + string.quoted.double.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + begin + ' + end + ' + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + match + ^\s*#.*$ + name + comment.block.dockerfile + + + scopeName + source.dockerfile + uuid + a39d8795-59d2-49af-aa00-fe74ee29576e + + diff --git a/contrib/syntax/textmate/Docker.tmbundle/info.plist b/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 00000000..239f4b0a --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/contrib/syntax/textmate/MAINTAINERS b/contrib/syntax/textmate/MAINTAINERS new file mode 100644 index 00000000..965743df --- /dev/null +++ b/contrib/syntax/textmate/MAINTAINERS @@ -0,0 +1 @@ +Asbjorn Enge (@asbjornenge) diff --git a/contrib/syntax/textmate/README.md b/contrib/syntax/textmate/README.md new file mode 100644 index 00000000..e78b76af --- /dev/null +++ b/contrib/syntax/textmate/README.md @@ -0,0 +1,16 @@ +# Docker.tmbundle + +Dockerfile syntaxt highlighting for TextMate and Sublime Text. + +## Install + +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +Copy the directory `Docker.tmbundle` (showed as a Package in OSX) to `~/Library/Application Support/TextMate/Managed/Bundles` + +enjoy. diff --git a/contrib/syntax/vim/LICENSE b/contrib/syntax/vim/LICENSE new file mode 100644 index 00000000..e67cdabd --- /dev/null +++ b/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/syntax/vim/README.md b/contrib/syntax/vim/README.md new file mode 100644 index 00000000..b7824661 --- /dev/null +++ b/contrib/syntax/vim/README.md @@ -0,0 +1,23 @@ +dockerfile.vim +============== + +Syntax highlighting for Dockerfiles + +Installation +------------ + +Via pathogen, the usual way... + +Features +-------- + +The syntax highlighting includes: + +* The directives (e.g. `FROM`) +* Strings +* Comments + +License +------- + +BSD, short and sweet diff --git a/contrib/syntax/vim/doc/dockerfile.txt b/contrib/syntax/vim/doc/dockerfile.txt new file mode 100644 index 00000000..37cc7be9 --- /dev/null +++ b/contrib/syntax/vim/doc/dockerfile.txt @@ -0,0 +1,18 @@ +*dockerfile.txt* Syntax highlighting for Dockerfiles + +Author: Honza Pokorny +License: BSD + +INSTALLATION *installation* + +Drop it on your Pathogen path and you're all set. + +FEATURES *features* + +The syntax highlighting includes: + +* The directives (e.g. FROM) +* Strings +* Comments + + vim:tw=78:et:ft=help:norl: diff --git a/contrib/syntax/vim/ftdetect/dockerfile.vim b/contrib/syntax/vim/ftdetect/dockerfile.vim new file mode 100644 index 00000000..83281d33 --- /dev/null +++ b/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -0,0 +1 @@ +au BufNewFile,BufRead Dockerfile set filetype=dockerfile diff --git a/contrib/syntax/vim/syntax/dockerfile.vim b/contrib/syntax/vim/syntax/dockerfile.vim new file mode 100644 index 00000000..2984bec5 --- /dev/null +++ b/contrib/syntax/vim/syntax/dockerfile.vim @@ -0,0 +1,23 @@ +" dockerfile.vim - Syntax highlighting for Dockerfiles +" Maintainer: Honza Pokorny +" Version: 0.5 + + +if exists("b:current_syntax") + finish +endif + +let b:current_syntax = "dockerfile" + +syntax case ignore + +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|VOLUME|WORKDIR|COPY)\s/ +highlight link dockerfileKeyword Keyword + +syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ +highlight link dockerfileString String + +syntax match dockerfileComment "\v^\s*#.*$" +highlight link dockerfileComment Comment + +set commentstring=#\ %s diff --git a/contrib/udev/80-docker.rules b/contrib/udev/80-docker.rules new file mode 100644 index 00000000..f934c017 --- /dev/null +++ b/contrib/udev/80-docker.rules @@ -0,0 +1,3 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/contrib/vagrant-docker/README.md b/contrib/vagrant-docker/README.md new file mode 100644 index 00000000..4ef9c287 --- /dev/null +++ b/contrib/vagrant-docker/README.md @@ -0,0 +1,50 @@ +# Vagrant integration + +Currently there are at least 4 different projects that we are aware of that deals +with integration with [Vagrant](http://vagrantup.com/) at different levels. One +approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) +which means you can create containers and pull base images on VMs using Docker's +CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), +meaning you can use Vagrant to control Docker containers. + + +### Provisioners + +* [Vocker](https://github.com/fgrehm/vocker) +* [Ventriloquist](https://github.com/fgrehm/ventriloquist) + +### Providers + +* [docker-provider](https://github.com/fgrehm/docker-provider) +* [vagrant-shell](https://github.com/destructuring/vagrant-shell) + +## Setting up Vagrant-docker with the Remote API + +The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: + +``` +description "Docker daemon" + +start on filesystem and started lxc-net +stop on runlevel [!2345] + +respawn + +script + /usr/bin/docker -d -H=tcp://0.0.0.0:2375 +end script +``` + +Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: + +``` +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost +``` + +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) + +Note that because the port has been changed, to run docker commands from within the command line you must run them like this: + +``` +sudo docker -H 0.0.0.0:2375 < commands for docker > +``` diff --git a/daemon/MAINTAINERS b/daemon/MAINTAINERS new file mode 100644 index 00000000..434aad9d --- /dev/null +++ b/daemon/MAINTAINERS @@ -0,0 +1,6 @@ +Solomon Hykes (@shykes) +Victor Vieux (@vieux) +Michael Crosby (@crosbymichael) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) +volumes.go: Brian Goff (@cpuguy83) diff --git a/daemon/README.md b/daemon/README.md new file mode 100644 index 00000000..64bfcb55 --- /dev/null +++ b/daemon/README.md @@ -0,0 +1,10 @@ +This directory contains code pertaining to running containers and storing images + +Code pertaining to running containers: + + - execdriver + - networkdriver + +Code pertaining to storing images: + + - graphdriver diff --git a/daemon/attach.go b/daemon/attach.go new file mode 100644 index 00000000..7ccaadf4 --- /dev/null +++ b/daemon/attach.go @@ -0,0 +1,273 @@ +package daemon + +import ( + "encoding/json" + "io" + "os" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + logs = job.GetenvBool("logs") + stream = job.GetenvBool("stream") + stdin = job.GetenvBool("stdin") + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + ) + + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + + //logs + if logs { + cLog, err := container.ReadLog("json") + if err != nil && os.IsNotExist(err) { + // Legacy logs + log.Debugf("Old logs format") + if stdout { + cLog, err := container.ReadLog("stdout") + if err != nil { + log.Errorf("Error reading logs (stdout): %s", err) + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + log.Errorf("Error streaming logs (stdout): %s", err) + } + } + if stderr { + cLog, err := container.ReadLog("stderr") + if err != nil { + log.Errorf("Error reading logs (stderr): %s", err) + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + log.Errorf("Error streaming logs (stderr): %s", err) + } + } + } else if err != nil { + log.Errorf("Error reading logs (json): %s", err) + } else { + dec := json.NewDecoder(cLog) + for { + l := &jsonlog.JSONLog{} + + if err := dec.Decode(l); err == io.EOF { + break + } else if err != nil { + log.Errorf("Error streaming logs: %s", err) + break + } + if l.Stream == "stdout" && stdout { + io.WriteString(job.Stdout, l.Log) + } + if l.Stream == "stderr" && stderr { + io.WriteString(job.Stderr, l.Log) + } + } + } + } + + //stream + if stream { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + cStdinCloser io.Closer + ) + + if stdin { + r, w := io.Pipe() + go func() { + defer w.Close() + defer log.Debugf("Closing buffered stdin pipe") + io.Copy(w, job.Stdin) + }() + cStdin = r + cStdinCloser = job.Stdin + } + if stdout { + cStdout = job.Stdout + } + if stderr { + cStderr = job.Stderr + } + + <-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr) + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if container.Config.StdinOnce && !container.Config.Tty { + container.WaitStop(-1 * time.Second) + } + } + return engine.StatusOK +} + +// FIXME: this should be private, and every outside subsystem +// should go through the "container_attach" job. But that would require +// that job to be properly documented, as well as the relationship between +// Attach and ContainerAttach. +// +// This method is in use by builder/builder.go. +func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { + var ( + cStdout, cStderr io.ReadCloser + nJobs int + errors = make(chan error, 3) + ) + + // Connect stdin of container to the http conn. + if stdin != nil && openStdin { + nJobs++ + // Get the stdin pipe. + if cStdin, err := streamConfig.StdinPipe(); err != nil { + errors <- err + } else { + go func() { + log.Debugf("attach: stdin: begin") + defer log.Debugf("attach: stdin: end") + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if stdinOnce && !tty { + defer cStdin.Close() + } else { + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + } + if tty { + _, err = utils.CopyEscapable(cStdin, stdin) + } else { + _, err = io.Copy(cStdin, stdin) + + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + log.Errorf("attach: stdin: %s", err) + } + errors <- err + }() + } + } + if stdout != nil { + nJobs++ + // Get a reader end of a pipe that is attached as stdout to the container. + if p, err := streamConfig.StdoutPipe(); err != nil { + errors <- err + } else { + cStdout = p + go func() { + log.Debugf("attach: stdout: begin") + defer log.Debugf("attach: stdout: end") + // If we are in StdinOnce mode, then close stdin + if stdinOnce && stdin != nil { + defer stdin.Close() + } + if stdinCloser != nil { + defer stdinCloser.Close() + } + _, err := io.Copy(stdout, cStdout) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + log.Errorf("attach: stdout: %s", err) + } + errors <- err + }() + } + } else { + // Point stdout of container to a no-op writer. + go func() { + if stdinCloser != nil { + defer stdinCloser.Close() + } + if cStdout, err := streamConfig.StdoutPipe(); err != nil { + log.Errorf("attach: stdout pipe: %s", err) + } else { + io.Copy(&ioutils.NopWriter{}, cStdout) + } + }() + } + if stderr != nil { + nJobs++ + if p, err := streamConfig.StderrPipe(); err != nil { + errors <- err + } else { + cStderr = p + go func() { + log.Debugf("attach: stderr: begin") + defer log.Debugf("attach: stderr: end") + // If we are in StdinOnce mode, then close stdin + // Why are we closing stdin here and above while handling stdout? + if stdinOnce && stdin != nil { + defer stdin.Close() + } + if stdinCloser != nil { + defer stdinCloser.Close() + } + _, err := io.Copy(stderr, cStderr) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + log.Errorf("attach: stderr: %s", err) + } + errors <- err + }() + } + } else { + // Point stderr at a no-op writer. + go func() { + if stdinCloser != nil { + defer stdinCloser.Close() + } + + if cStderr, err := streamConfig.StderrPipe(); err != nil { + log.Errorf("attach: stdout pipe: %s", err) + } else { + io.Copy(&ioutils.NopWriter{}, cStderr) + } + }() + } + + return promise.Go(func() error { + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + + // FIXME: how to clean up the stdin goroutine without the unwanted side effect + // of closing the passed stdin? Add an intermediary io.Pipe? + for i := 0; i < nJobs; i++ { + log.Debugf("attach: waiting for job %d/%d", i+1, nJobs) + if err := <-errors; err != nil { + log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) + return err + } + log.Debugf("attach: job %d completed successfully", i+1) + } + log.Debugf("attach: all jobs completed successfully") + return nil + }) +} diff --git a/daemon/changes.go b/daemon/changes.go new file mode 100644 index 00000000..1e5726ed --- /dev/null +++ b/daemon/changes.go @@ -0,0 +1,32 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + outs := engine.NewTable("", 0) + changes, err := container.Changes() + if err != nil { + return job.Error(err) + } + for _, change := range changes { + out := &engine.Env{} + if err := out.Import(change); err != nil { + return job.Error(err) + } + outs.Add(out) + } + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} diff --git a/daemon/commit.go b/daemon/commit.go new file mode 100644 index 00000000..950925ad --- /dev/null +++ b/daemon/commit.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + + var ( + config = container.Config + newConfig runconfig.Config + ) + + if err := job.GetenvJson("config", &newConfig); err != nil { + return job.Error(err) + } + + if err := runconfig.Merge(&newConfig, config); err != nil { + return job.Error(err) + } + + img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", img.ID) + return engine.StatusOK +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository +func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) { + if pause { + container.Pause() + defer container.Unpause() + } + + if err := container.Mount(); err != nil { + return nil, err + } + defer container.Unmount() + + rwTar, err := container.ExportRw() + if err != nil { + return nil, err + } + defer rwTar.Close() + + // Create a new image from the container's base layers + a new layer from container changes + var ( + containerID, containerImage string + containerConfig *runconfig.Config + ) + + if container != nil { + containerID = container.ID + containerImage = container.Image + containerConfig = container.Config + } + + img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) + if err != nil { + return nil, err + } + + // Register the image if needed + if repository != "" { + if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil { + return img, err + } + } + return img, nil +} diff --git a/daemon/config.go b/daemon/config.go new file mode 100644 index 00000000..e45e73b9 --- /dev/null +++ b/daemon/config.go @@ -0,0 +1,86 @@ +package daemon + +import ( + "net" + + "github.com/docker/docker/daemon/networkdriver" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +// Config define the configuration of a docker daemon +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +// FIXME: separate runtime configuration from http api configuration +type Config struct { + Pidfile string + Root string + AutoRestart bool + Dns []string + DnsSearch []string + Mirrors []string + EnableIptables bool + EnableIpForward bool + EnableIpMasq bool + DefaultIp net.IP + BridgeIface string + BridgeIP string + FixedCIDR string + InsecureRegistries []string + InterContainerCommunication bool + GraphDriver string + GraphOptions []string + ExecDriver string + Mtu int + DisableNetwork bool + EnableSelinuxSupport bool + Context map[string][]string +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags() { + flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") + flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime") + flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") + flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") + flag.BoolVar(&config.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading for bridge's IP range") + flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") + flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") + flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") + opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)") + flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") + flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") + flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") + flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") + flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available") + opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") + opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options") + // FIXME: why the inconsistency between "hosts" and "sockets"? + opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") + opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") + opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror") + + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + // If so, do not forget to check the TODO in TestIsSecure + config.InsecureRegistries = append(config.InsecureRegistries, "127.0.0.0/8") +} + +func GetDefaultNetworkMtu() int { + if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { + return iface.MTU + } + return defaultNetworkMtu +} diff --git a/daemon/container.go b/daemon/container.go new file mode 100644 index 00000000..6fd45079 --- /dev/null +++ b/daemon/container.go @@ -0,0 +1,1246 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/docker/libcontainer/devices" + "github.com/docker/libcontainer/label" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/links" + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/networkfs/etchosts" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +var ( + ErrNotATTY = errors.New("The PTY is not a file") + ErrNoTTY = errors.New("No PTY found") + ErrContainerStart = errors.New("The container failed to start. Unknown error") + ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") +) + +type StreamConfig struct { + stdout *broadcastwriter.BroadcastWriter + stderr *broadcastwriter.BroadcastWriter + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +type Container struct { + *State `json:"State"` // Needed for remote api version <= 1.11 + root string // Path to the "home" of the container, including metadata. + basefs string // Path to the graphdriver mountpoint + + ID string + + Created time.Time + + Path string + Args []string + + Config *runconfig.Config + Image string + + NetworkSettings *NetworkSettings + + ResolvConfPath string + HostnamePath string + HostsPath string + Name string + Driver string + ExecDriver string + + command *execdriver.Command + StreamConfig + + daemon *Daemon + MountLabel, ProcessLabel string + AppArmorProfile string + RestartCount int + + // Maps container paths to volume paths. The key in this is the path to which + // the volume is being mounted inside the container. Value is the path of the + // volume on disk + Volumes map[string]string + // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. + // Easier than migrating older container configs :) + VolumesRW map[string]bool + hostConfig *runconfig.HostConfig + + activeLinks map[string]*links.Link + monitor *containerMonitor + execCommands *execStore +} + +func (container *Container) FromDisk() error { + pth, err := container.jsonPath() + if err != nil { + return err + } + + data, err := ioutil.ReadFile(pth) + if err != nil { + return err + } + // Load container settings + // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it + if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +func (container *Container) toDisk() error { + data, err := json.Marshal(container) + if err != nil { + return err + } + + pth, err := container.jsonPath() + if err != nil { + return err + } + + err = ioutil.WriteFile(pth, data, 0666) + if err != nil { + return err + } + + return container.WriteHostConfig() +} + +func (container *Container) ToDisk() error { + container.Lock() + err := container.toDisk() + container.Unlock() + return err +} + +func (container *Container) readHostConfig() error { + container.hostConfig = &runconfig.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.hostConfig, + // but that's OK, since we just did that above.) + pth, err := container.hostConfigPath() + if err != nil { + return err + } + + _, err = os.Stat(pth) + if os.IsNotExist(err) { + return nil + } + + data, err := ioutil.ReadFile(pth) + if err != nil { + return err + } + return json.Unmarshal(data, container.hostConfig) +} + +func (container *Container) WriteHostConfig() error { + data, err := json.Marshal(container.hostConfig) + if err != nil { + return err + } + + pth, err := container.hostConfigPath() + if err != nil { + return err + } + + return ioutil.WriteFile(pth, data, 0666) +} + +func (container *Container) LogEvent(action string) { + d := container.daemon + if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil { + log.Errorf("Error logging event %s for %s: %s", action, container.ID, err) + } +} + +func (container *Container) getResourcePath(path string) (string, error) { + cleanPath := filepath.Join("/", path) + return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) +} + +func (container *Container) getRootResourcePath(path string) (string, error) { + cleanPath := filepath.Join("/", path) + return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) +} + +func populateCommand(c *Container, env []string) error { + en := &execdriver.Network{ + Mtu: c.daemon.config.Mtu, + Interface: nil, + } + + parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) + switch parts[0] { + case "none": + case "host": + en.HostNetworking = true + case "bridge", "": // empty string to support existing containers + if !c.Config.NetworkDisabled { + network := c.NetworkSettings + en.Interface = &execdriver.NetworkInterface{ + Gateway: network.Gateway, + Bridge: network.Bridge, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + MacAddress: network.MacAddress, + } + } + case "container": + nc, err := c.getNetworkedContainer() + if err != nil { + return err + } + en.ContainerID = nc.ID + default: + return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) + } + + // Build lists of devices allowed and created within the container. + userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices)) + for i, deviceMapping := range c.hostConfig.Devices { + device, err := devices.GetDevice(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) + if err != nil { + return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) + } + device.Path = deviceMapping.PathInContainer + userSpecifiedDevices[i] = device + } + allowedDevices := append(devices.DefaultAllowedDevices, userSpecifiedDevices...) + + autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...) + + // TODO: this can be removed after lxc-conf is fully deprecated + lxcConfig := mergeLxcConfIntoOptions(c.hostConfig) + + resources := &execdriver.Resources{ + Memory: c.Config.Memory, + MemorySwap: c.Config.MemorySwap, + CpuShares: c.Config.CpuShares, + Cpuset: c.Config.Cpuset, + } + + processConfig := execdriver.ProcessConfig{ + Privileged: c.hostConfig.Privileged, + Entrypoint: c.Path, + Arguments: c.Args, + Tty: c.Config.Tty, + User: c.Config.User, + } + + processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} + processConfig.Env = env + + c.command = &execdriver.Command{ + ID: c.ID, + Rootfs: c.RootfsPath(), + InitPath: "/.dockerinit", + WorkingDir: c.Config.WorkingDir, + Network: en, + Resources: resources, + AllowedDevices: allowedDevices, + AutoCreatedDevices: autoCreatedDevices, + CapAdd: c.hostConfig.CapAdd, + CapDrop: c.hostConfig.CapDrop, + ProcessConfig: processConfig, + ProcessLabel: c.GetProcessLabel(), + MountLabel: c.GetMountLabel(), + LxcConfig: lxcConfig, + AppArmorProfile: c.AppArmorProfile, + } + + return nil +} + +func (container *Container) Start() (err error) { + container.Lock() + defer container.Unlock() + + if container.Running { + return nil + } + + // if we encounter and error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.cleanup() + } + }() + + if err := container.setupContainerDns(); err != nil { + return err + } + if err := container.Mount(); err != nil { + return err + } + if err := container.initializeNetworking(); err != nil { + return err + } + if err := container.updateParentsHosts(); err != nil { + return err + } + container.verifyDaemonSettings() + if err := container.prepareVolumes(); err != nil { + return err + } + linkedEnv, err := container.setupLinkedContainers() + if err != nil { + return err + } + if err := container.setupWorkingDirectory(); err != nil { + return err + } + env := container.createDaemonEnvironment(linkedEnv) + if err := populateCommand(container, env); err != nil { + return err + } + if err := container.setupMounts(); err != nil { + return err + } + + return container.waitForStart() +} + +func (container *Container) Run() error { + if err := container.Start(); err != nil { + return err + } + container.WaitStop(-1 * time.Second) + return nil +} + +func (container *Container) Output() (output []byte, err error) { + pipe, err := container.StdoutPipe() + if err != nil { + return nil, err + } + defer pipe.Close() + if err := container.Start(); err != nil { + return nil, err + } + output, err = ioutil.ReadAll(pipe) + container.WaitStop(-1 * time.Second) + return output, err +} + +// StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the container's active process. +// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". + +func (streamConfig *StreamConfig) StdinPipe() (io.WriteCloser, error) { + return streamConfig.stdinPipe, nil +} + +func (streamConfig *StreamConfig) StdoutPipe() (io.ReadCloser, error) { + reader, writer := io.Pipe() + streamConfig.stdout.AddWriter(writer, "") + return ioutils.NewBufReader(reader), nil +} + +func (streamConfig *StreamConfig) StderrPipe() (io.ReadCloser, error) { + reader, writer := io.Pipe() + streamConfig.stderr.AddWriter(writer, "") + return ioutils.NewBufReader(reader), nil +} + +func (streamConfig *StreamConfig) StdoutLogPipe() io.ReadCloser { + reader, writer := io.Pipe() + streamConfig.stdout.AddWriter(writer, "stdout") + return ioutils.NewBufReader(reader) +} + +func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser { + reader, writer := io.Pipe() + streamConfig.stderr.AddWriter(writer, "stderr") + return ioutils.NewBufReader(reader) +} + +func (container *Container) buildHostnameFile() error { + hostnamePath, err := container.getRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + + if container.Config.Domainname != "" { + return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) + } + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +func (container *Container) buildHostsFiles(IP string) error { + + hostsPath, err := container.getRootResourcePath("hosts") + if err != nil { + return err + } + container.HostsPath = hostsPath + + extraContent := make(map[string]string) + + children, err := container.daemon.Children(container.Name) + if err != nil { + return err + } + + for linkAlias, child := range children { + _, alias := path.Split(linkAlias) + extraContent[alias] = child.NetworkSettings.IPAddress + } + + for _, extraHost := range container.hostConfig.ExtraHosts { + parts := strings.Split(extraHost, ":") + extraContent[parts[0]] = parts[1] + } + + return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, &extraContent) +} + +func (container *Container) buildHostnameAndHostsFiles(IP string) error { + if err := container.buildHostnameFile(); err != nil { + return err + } + + return container.buildHostsFiles(IP) +} + +func (container *Container) AllocateNetwork() error { + mode := container.hostConfig.NetworkMode + if container.Config.NetworkDisabled || !mode.IsPrivate() { + return nil + } + + var ( + env *engine.Env + err error + eng = container.daemon.eng + ) + + job := eng.Job("allocate_interface", container.ID) + if env, err = job.Stdout.AddEnv(); err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + + // Error handling: At this point, the interface is allocated so we have to + // make sure that it is always released in case of error, otherwise we + // might leak resources. + + if container.Config.PortSpecs != nil { + if err = migratePortMappings(container.Config, container.hostConfig); err != nil { + eng.Job("release_interface", container.ID).Run() + return err + } + container.Config.PortSpecs = nil + if err = container.WriteHostConfig(); err != nil { + eng.Job("release_interface", container.ID).Run() + return err + } + } + + var ( + portSpecs = make(nat.PortSet) + bindings = make(nat.PortMap) + ) + + if container.Config.ExposedPorts != nil { + portSpecs = container.Config.ExposedPorts + } + + if container.hostConfig.PortBindings != nil { + for p, b := range container.hostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIp: bb.HostIp, + HostPort: bb.HostPort, + }) + } + } + } + + container.NetworkSettings.PortMapping = nil + + for port := range portSpecs { + if err = container.allocatePort(eng, port, bindings); err != nil { + eng.Job("release_interface", container.ID).Run() + return err + } + } + container.WriteHostConfig() + + container.NetworkSettings.Ports = bindings + container.NetworkSettings.Bridge = env.Get("Bridge") + container.NetworkSettings.IPAddress = env.Get("IP") + container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen") + container.NetworkSettings.MacAddress = env.Get("MacAddress") + container.NetworkSettings.Gateway = env.Get("Gateway") + + return nil +} + +func (container *Container) ReleaseNetwork() { + if container.Config.NetworkDisabled { + return + } + eng := container.daemon.eng + + eng.Job("release_interface", container.ID).Run() + container.NetworkSettings = &NetworkSettings{} +} + +func (container *Container) isNetworkAllocated() bool { + return container.NetworkSettings.IPAddress != "" +} + +func (container *Container) RestoreNetwork() error { + mode := container.hostConfig.NetworkMode + // Don't attempt a restore if we previously didn't allocate networking. + // This might be a legacy container with no network allocated, in which case the + // allocation will happen once and for all at start. + if !container.isNetworkAllocated() || container.Config.NetworkDisabled || !mode.IsPrivate() { + return nil + } + + eng := container.daemon.eng + + // Re-allocate the interface with the same IP and MAC address. + job := eng.Job("allocate_interface", container.ID) + job.Setenv("RequestedIP", container.NetworkSettings.IPAddress) + job.Setenv("RequestedMac", container.NetworkSettings.MacAddress) + if err := job.Run(); err != nil { + return err + } + + // Re-allocate any previously allocated ports. + for port := range container.NetworkSettings.Ports { + if err := container.allocatePort(eng, port, container.NetworkSettings.Ports); err != nil { + return err + } + } + return nil +} + +// cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (container *Container) cleanup() { + container.ReleaseNetwork() + + // Disable all active links + if container.activeLinks != nil { + for _, link := range container.activeLinks { + link.Disable() + } + } + + if err := container.Unmount(); err != nil { + log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) + } +} + +func (container *Container) KillSig(sig int) error { + log.Debugf("Sending %d to %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return nil + } + + // signal to the monitor that it should not restart the container + // after we send the kill signal + container.monitor.ExitOnNext() + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on it's next event + // loop is enough + if container.Restarting { + return nil + } + + return container.daemon.Kill(container, sig) +} + +func (container *Container) Pause() error { + if container.IsPaused() { + return fmt.Errorf("Container %s is already paused", container.ID) + } + if !container.IsRunning() { + return fmt.Errorf("Container %s is not running", container.ID) + } + return container.daemon.Pause(container) +} + +func (container *Container) Unpause() error { + if !container.IsPaused() { + return fmt.Errorf("Container %s is not paused", container.ID) + } + if !container.IsRunning() { + return fmt.Errorf("Container %s is not running", container.ID) + } + return container.daemon.Unpause(container) +} + +func (container *Container) Kill() error { + if !container.IsRunning() { + return nil + } + + // 1. Send SIGKILL + if err := container.KillSig(9); err != nil { + return err + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPid(); pid != 0 { + log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + return err + } + } + } + + container.WaitStop(-1 * time.Second) + return nil +} + +func (container *Container) Stop(seconds int) error { + if !container.IsRunning() { + return nil + } + + // 1. Send a SIGTERM + if err := container.KillSig(15); err != nil { + log.Infof("Failed to send SIGTERM to the process, force killing") + if err := container.KillSig(9); err != nil { + return err + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) + // 3. If it doesn't, then send SIGKILL + if err := container.Kill(); err != nil { + container.WaitStop(-1 * time.Second) + return err + } + } + return nil +} + +func (container *Container) Restart(seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := container.Mount(); err == nil { + defer container.Unmount() + } + + if err := container.Stop(seconds); err != nil { + return err + } + return container.Start() +} + +func (container *Container) Resize(h, w int) error { + return container.command.ProcessConfig.Terminal.Resize(h, w) +} + +func (container *Container) ExportRw() (archive.Archive, error) { + if err := container.Mount(); err != nil { + return nil, err + } + if container.daemon == nil { + return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) + } + archive, err := container.daemon.Diff(container) + if err != nil { + container.Unmount() + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), + nil +} + +func (container *Container) Export() (archive.Archive, error) { + if err := container.Mount(); err != nil { + return nil, err + } + + archive, err := archive.Tar(container.basefs, archive.Uncompressed) + if err != nil { + container.Unmount() + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), + nil +} + +func (container *Container) Mount() error { + return container.daemon.Mount(container) +} + +func (container *Container) changes() ([]archive.Change, error) { + return container.daemon.Changes(container) +} + +func (container *Container) Changes() ([]archive.Change, error) { + container.Lock() + defer container.Unlock() + return container.changes() +} + +func (container *Container) GetImage() (*image.Image, error) { + if container.daemon == nil { + return nil, fmt.Errorf("Can't get image of unregistered container") + } + return container.daemon.graph.Get(container.Image) +} + +func (container *Container) Unmount() error { + return container.daemon.Unmount(container) +} + +func (container *Container) logPath(name string) (string, error) { + return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name)) +} + +func (container *Container) ReadLog(name string) (io.Reader, error) { + pth, err := container.logPath(name) + if err != nil { + return nil, err + } + return os.Open(pth) +} + +func (container *Container) hostConfigPath() (string, error) { + return container.getRootResourcePath("hostconfig.json") +} + +func (container *Container) jsonPath() (string, error) { + return container.getRootResourcePath("config.json") +} + +// This method must be exported to be used from the lxc template +// This directory is only usable when the container is running +func (container *Container) RootfsPath() string { + return container.basefs +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} + +// GetSize, return real size, virtual size +func (container *Container) GetSize() (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + driver = container.daemon.driver + ) + + if err := container.Mount(); err != nil { + log.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer container.Unmount() + + initID := fmt.Sprintf("%s-init", container.ID) + sizeRw, err = driver.DiffSize(container.ID, initID) + if err != nil { + log.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if _, err = os.Stat(container.basefs); err != nil { + if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { + sizeRootfs = -1 + } + } + return sizeRw, sizeRootfs +} + +func (container *Container) Copy(resource string) (io.ReadCloser, error) { + if err := container.Mount(); err != nil { + return nil, err + } + + var filter []string + + basePath, err := container.getResourcePath(resource) + if err != nil { + container.Unmount() + return nil, err + } + + stat, err := os.Stat(basePath) + if err != nil { + container.Unmount() + return nil, err + } + if !stat.IsDir() { + d, f := path.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{path.Base(basePath)} + basePath = path.Dir(basePath) + } + + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + Includes: filter, + }) + if err != nil { + container.Unmount() + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), + nil +} + +// Returns true if the container exposes a certain port +func (container *Container) Exposes(p nat.Port) bool { + _, exists := container.Config.ExposedPorts[p] + return exists +} + +func (container *Container) GetPtyMaster() (*os.File, error) { + ttyConsole, ok := container.command.ProcessConfig.Terminal.(execdriver.TtyTerminal) + if !ok { + return nil, ErrNoTTY + } + return ttyConsole.Master(), nil +} + +func (container *Container) HostConfig() *runconfig.HostConfig { + container.Lock() + res := container.hostConfig + container.Unlock() + return res +} + +func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) { + container.Lock() + container.hostConfig = hostConfig + container.Unlock() +} + +func (container *Container) DisableLink(name string) { + if container.activeLinks != nil { + if link, exists := container.activeLinks[name]; exists { + link.Disable() + } else { + log.Debugf("Could not find active link for %s", name) + } + } +} + +func (container *Container) setupContainerDns() error { + if container.ResolvConfPath != "" { + return nil + } + + var ( + config = container.hostConfig + daemon = container.daemon + ) + + resolvConf, err := resolvconf.Get() + if err != nil { + return err + } + container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf") + if err != nil { + return err + } + + if config.NetworkMode != "host" { + // check configurations for any container/daemon dns settings + if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 { + var ( + dns = resolvconf.GetNameservers(resolvConf) + dnsSearch = resolvconf.GetSearchDomains(resolvConf) + ) + if len(config.Dns) > 0 { + dns = config.Dns + } else if len(daemon.config.Dns) > 0 { + dns = daemon.config.Dns + } + if len(config.DnsSearch) > 0 { + dnsSearch = config.DnsSearch + } else if len(daemon.config.DnsSearch) > 0 { + dnsSearch = daemon.config.DnsSearch + } + return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch) + } + + // replace any localhost/127.* nameservers + resolvConf = utils.RemoveLocalDns(resolvConf) + // if the resulting resolvConf is empty, use DefaultDns + if !bytes.Contains(resolvConf, []byte("nameserver")) { + log.Infof("No non localhost DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v", DefaultDns) + // prefix the default dns options with nameserver + resolvConf = append(resolvConf, []byte("\nnameserver "+strings.Join(DefaultDns, "\nnameserver "))...) + } + } + return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644) +} + +func (container *Container) updateParentsHosts() error { + parents, err := container.daemon.Parents(container.Name) + if err != nil { + return err + } + for _, cid := range parents { + if cid == "0" { + continue + } + + c := container.daemon.Get(cid) + if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() { + if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, container.Name[1:]); err != nil { + return fmt.Errorf("Failed to update /etc/hosts in parent container: %v", err) + } + } + } + return nil +} + +func (container *Container) initializeNetworking() error { + var err error + if container.hostConfig.NetworkMode.IsHost() { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + + parts := strings.SplitN(container.Config.Hostname, ".", 2) + if len(parts) > 1 { + container.Config.Hostname = parts[0] + container.Config.Domainname = parts[1] + } + + content, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + return container.buildHostnameAndHostsFiles("") + } else if err != nil { + return err + } + + if err := container.buildHostnameFile(); err != nil { + return err + } + + hostsPath, err := container.getRootResourcePath("hosts") + if err != nil { + return err + } + container.HostsPath = hostsPath + + return ioutil.WriteFile(container.HostsPath, content, 0644) + } + if container.hostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := container.getNetworkedContainer() + if err != nil { + return err + } + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + if container.daemon.config.DisableNetwork { + container.Config.NetworkDisabled = true + return container.buildHostnameAndHostsFiles("127.0.1.1") + } + if err := container.AllocateNetwork(); err != nil { + return err + } + return container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) +} + +// Make sure the config is compatible with the current kernel +func (container *Container) verifyDaemonSettings() { + if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit { + log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.") + container.Config.Memory = 0 + } + if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit { + log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.") + container.Config.MemorySwap = -1 + } + if container.daemon.sysInfo.IPv4ForwardingDisabled { + log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work") + } +} + +func (container *Container) setupLinkedContainers() ([]string, error) { + var ( + env []string + daemon = container.daemon + ) + children, err := daemon.Children(container.Name) + if err != nil { + return nil, err + } + + if len(children) > 0 { + container.activeLinks = make(map[string]*links.Link, len(children)) + + // If we encounter an error make sure that we rollback any network + // config and ip table changes + rollback := func() { + for _, link := range container.activeLinks { + link.Disable() + } + container.activeLinks = nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + link, err := links.NewLink( + container.NetworkSettings.IPAddress, + child.NetworkSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + daemon.eng) + + if err != nil { + rollback() + return nil, err + } + + container.activeLinks[link.Alias()] = link + if err := link.Enable(); err != nil { + rollback() + return nil, err + } + + for _, envVar := range link.ToEnv() { + env = append(env, envVar) + } + } + } + return env, nil +} + +func (container *Container) createDaemonEnvironment(linkedEnv []string) []string { + // if a domain name was specified, append it to the hostname (see #7851) + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + // Setup environment + env := []string{ + "PATH=" + DefaultPathEnv, + "HOSTNAME=" + fullHostname, + // Note: we don't set HOME here because it'll get autoset intelligently + // based on the value of USER inside dockerinit, but only if it isn't + // set already (ie, that can be overridden by setting HOME via -e or ENV + // in a Dockerfile). + } + if container.Config.Tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + + return env +} + +func (container *Container) setupWorkingDirectory() error { + if container.Config.WorkingDir != "" { + container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) + + pth, err := container.getResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + pthInfo, err := os.Stat(pth) + if err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := os.MkdirAll(pth, 0755); err != nil { + return err + } + } + if pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + } + return nil +} + +func (container *Container) startLoggingToDisk() error { + // Setup logging of stdout and stderr to disk + pth, err := container.logPath("json") + if err != nil { + return err + } + + if err := container.daemon.LogToDisk(container.stdout, pth, "stdout"); err != nil { + return err + } + + if err := container.daemon.LogToDisk(container.stderr, pth, "stderr"); err != nil { + return err + } + + return nil +} + +func (container *Container) waitForStart() error { + container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) + + // block until we either receive an error from the initial start of the container's + // process or until the process is running in the container + select { + case <-container.monitor.startSignal: + case err := <-promise.Go(container.monitor.Start): + return err + } + + return nil +} + +func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bindings nat.PortMap) error { + binding := bindings[port] + if container.hostConfig.PublishAllPorts && len(binding) == 0 { + binding = append(binding, nat.PortBinding{}) + } + + for i := 0; i < len(binding); i++ { + b := binding[i] + + job := eng.Job("allocate_port", container.ID) + job.Setenv("HostIP", b.HostIp) + job.Setenv("HostPort", b.HostPort) + job.Setenv("Proto", port.Proto()) + job.Setenv("ContainerPort", port.Port()) + + portEnv, err := job.Stdout.AddEnv() + if err != nil { + return err + } + if err := job.Run(); err != nil { + return err + } + b.HostIp = portEnv.Get("HostIP") + b.HostPort = portEnv.Get("HostPort") + + binding[i] = b + } + bindings[port] = binding + return nil +} + +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.hostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +func (container *Container) GetMountLabel() string { + if container.hostConfig.Privileged { + return "" + } + return container.MountLabel +} + +func (container *Container) getNetworkedContainer() (*Container, error) { + parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) + switch parts[0] { + case "container": + nc := container.daemon.Get(parts[1]) + if nc == nil { + return nil, fmt.Errorf("no such container to join network: %s", parts[1]) + } + if !nc.IsRunning() { + return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) + } + return nc, nil + default: + return nil, fmt.Errorf("network mode not set to container") + } +} diff --git a/daemon/container_unit_test.go b/daemon/container_unit_test.go new file mode 100644 index 00000000..9599675f --- /dev/null +++ b/daemon/container_unit_test.go @@ -0,0 +1,197 @@ +package daemon + +import ( + "github.com/docker/docker/nat" + "testing" +) + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + t.Logf("%v", len(ports)) + t.Logf("%v", bindings) + if len(ports) != 0 { + t.Logf("Expected nil got %s", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %s", len(bindings)) + t.Fail() + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestGetFullName(t *testing.T) { + name, err := GetFullContainerName("testing") + if err != nil { + t.Fatal(err) + } + if name != "/testing" { + t.Fatalf("Expected /testing got %s", name) + } + if _, err := GetFullContainerName(""); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} diff --git a/daemon/copy.go b/daemon/copy.go new file mode 100644 index 00000000..9d18b010 --- /dev/null +++ b/daemon/copy.go @@ -0,0 +1,33 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) + } + + var ( + name = job.Args[0] + resource = job.Args[1] + ) + + if container := daemon.Get(name); container != nil { + + data, err := container.Copy(resource) + if err != nil { + return job.Error(err) + } + defer data.Close() + + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff --git a/daemon/create.go b/daemon/create.go new file mode 100644 index 00000000..e72b0ef2 --- /dev/null +++ b/daemon/create.go @@ -0,0 +1,101 @@ +package daemon + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { + var name string + if len(job.Args) == 1 { + name = job.Args[0] + } else if len(job.Args) > 1 { + return job.Errorf("Usage: %s", job.Name) + } + config := runconfig.ContainerConfigFromJob(job) + if config.Memory != 0 && config.Memory < 4194304 { + return job.Errorf("Minimum memory limit allowed is 4MB") + } + if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit { + job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") + config.Memory = 0 + } + if config.Memory > 0 && !daemon.SystemConfig().SwapLimit { + job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") + config.MemorySwap = -1 + } + + var hostConfig *runconfig.HostConfig + if job.EnvExists("HostConfig") { + hostConfig = runconfig.ContainerHostConfigFromJob(job) + } else { + // Older versions of the API don't provide a HostConfig. + hostConfig = nil + } + + container, buildWarnings, err := daemon.Create(config, hostConfig, name) + if err != nil { + if daemon.Graph().IsNotExist(err) { + _, tag := parsers.ParseRepositoryTag(config.Image) + if tag == "" { + tag = graph.DEFAULTTAG + } + return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) + } + return job.Error(err) + } + if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { + job.Errorf("IPv4 forwarding is disabled.\n") + } + container.LogEvent("create") + // FIXME: this is necessary because daemon.Create might return a nil container + // with a non-nil error. This should not happen! Once it's fixed we + // can remove this workaround. + if container != nil { + job.Printf("%s\n", container.ID) + } + for _, warning := range buildWarnings { + job.Errorf("%s\n", warning) + } + + return engine.StatusOK +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (*Container, []string, error) { + var ( + container *Container + warnings []string + ) + + img, err := daemon.repositories.LookupImage(config.Image) + if err != nil { + return nil, nil, err + } + if err := img.CheckDepth(); err != nil { + return nil, nil, err + } + if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { + return nil, nil, err + } + if container, err = daemon.newContainer(name, config, img); err != nil { + return nil, nil, err + } + if err := daemon.Register(container); err != nil { + return nil, nil, err + } + if err := daemon.createRootfs(container, img); err != nil { + return nil, nil, err + } + if hostConfig != nil { + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return nil, nil, err + } + } + if err := container.ToDisk(); err != nil { + return nil, nil, err + } + return container, warnings, nil +} diff --git a/daemon/daemon.go b/daemon/daemon.go new file mode 100644 index 00000000..9f90643c --- /dev/null +++ b/daemon/daemon.go @@ -0,0 +1,1126 @@ +package daemon + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/libcontainer/label" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/execdrivers" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" + _ "github.com/docker/docker/daemon/networkdriver/bridge" + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/trust" + "github.com/docker/docker/utils" + "github.com/docker/docker/volumes" +) + +var ( + DefaultDns = []string{"8.8.8.8", "8.8.4.4"} + validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) +) + +type contStore struct { + s map[string]*Container + sync.Mutex +} + +func (c *contStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +func (c *contStore) Get(id string) *Container { + c.Lock() + res := c.s[id] + c.Unlock() + return res +} + +func (c *contStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +func (c *contStore) List() []*Container { + containers := new(History) + c.Lock() + for _, cont := range c.s { + containers.Add(cont) + } + c.Unlock() + containers.Sort() + return *containers +} + +type Daemon struct { + repository string + sysInitPath string + containers *contStore + execCommands *execStore + graph *graph.Graph + repositories *graph.TagStore + idIndex *truncindex.TruncIndex + sysInfo *sysinfo.SysInfo + volumes *volumes.Repository + eng *engine.Engine + config *Config + containerGraph *graphdb.Database + driver graphdriver.Driver + execDriver execdriver.Driver + trustStore *trust.TrustStore +} + +// Install installs daemon capabilities to eng. +func (daemon *Daemon) Install(eng *engine.Engine) error { + // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ + for name, method := range map[string]engine.Handler{ + "attach": daemon.ContainerAttach, + "commit": daemon.ContainerCommit, + "container_changes": daemon.ContainerChanges, + "container_copy": daemon.ContainerCopy, + "container_inspect": daemon.ContainerInspect, + "containers": daemon.Containers, + "create": daemon.ContainerCreate, + "rm": daemon.ContainerRm, + "export": daemon.ContainerExport, + "info": daemon.CmdInfo, + "kill": daemon.ContainerKill, + "logs": daemon.ContainerLogs, + "pause": daemon.ContainerPause, + "resize": daemon.ContainerResize, + "restart": daemon.ContainerRestart, + "start": daemon.ContainerStart, + "stop": daemon.ContainerStop, + "top": daemon.ContainerTop, + "unpause": daemon.ContainerUnpause, + "wait": daemon.ContainerWait, + "image_delete": daemon.ImageDelete, // FIXME: see above + "execCreate": daemon.ContainerExecCreate, + "execStart": daemon.ContainerExecStart, + "execResize": daemon.ContainerExecResize, + } { + if err := eng.Register(name, method); err != nil { + return err + } + } + if err := daemon.Repositories().Install(eng); err != nil { + return err + } + if err := daemon.trustStore.Install(eng); err != nil { + return err + } + // FIXME: this hack is necessary for legacy integration tests to access + // the daemon object. + eng.Hack_SetGlobalVar("httpapi.daemon", daemon) + return nil +} + +// Get looks for a container by the specified ID or name, and returns it. +// If the container is not found, or if an error occurs, nil is returned. +func (daemon *Daemon) Get(name string) *Container { + if id, err := daemon.idIndex.Get(name); err == nil { + return daemon.containers.Get(id) + } + if c, _ := daemon.GetByName(name); c != nil { + return c + } + return nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + return daemon.Get(id) != nil +} + +func (daemon *Daemon) containerRoot(id string) string { + return path.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*Container, error) { + container := &Container{ + root: daemon.containerRoot(id), + State: NewState(), + execCommands: newExecStore(), + } + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + container.readHostConfig() + + return container, nil +} + +// Register makes a container object usable by the daemon as +// This is a wrapper for register +func (daemon *Daemon) Register(container *Container) error { + return daemon.register(container, true) +} + +// register makes a container object usable by the daemon as +func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { + if container.daemon != nil || daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if err := daemon.ensureName(container); err != nil { + return err + } + + container.daemon = daemon + + // Attach to stdout and stderr + container.stderr = broadcastwriter.New() + container.stdout = broadcastwriter.New() + // Attach to stdin + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } else { + container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin + } + // done + daemon.containers.Add(container.ID, container) + + // don't update the Suffixarray if we're starting up + // we'll waste time if we update it for every container + daemon.idIndex.Add(container.ID) + + // FIXME: if the container is supposed to be running but is not, auto restart it? + // if so, then we need to restart monitor and init a new lock + // If the container is supposed to be running, make sure of it + if container.IsRunning() { + log.Debugf("killing old running container %s", container.ID) + + existingPid := container.Pid + container.SetStopped(0) + + // We only have to handle this for lxc because the other drivers will ensure that + // no processes are left when docker dies + if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { + lxc.KillLxc(container.ID, 9) + } else { + // use the current driver and ensure that the container is dead x.x + cmd := &execdriver.Command{ + ID: container.ID, + } + var err error + cmd.ProcessConfig.Process, err = os.FindProcess(existingPid) + if err != nil { + log.Debugf("cannot find existing process for %d", existingPid) + } + daemon.execDriver.Terminate(cmd) + } + + if err := container.Unmount(); err != nil { + log.Debugf("unmount error %s", err) + } + if err := container.ToDisk(); err != nil { + log.Debugf("saving stopped state to disk %s", err) + } + + info := daemon.execDriver.Info(container.ID) + if !info.IsRunning() { + log.Debugf("Container %s was supposed to be running but is not.", container.ID) + + log.Debugf("Marking as stopped") + + container.SetStopped(-127) + if err := container.ToDisk(); err != nil { + return err + } + } + } + return nil +} + +func (daemon *Daemon) ensureName(container *Container) error { + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDisk(); err != nil { + log.Debugf("Error saving container name %s", err) + } + } + return nil +} + +func (daemon *Daemon) LogToDisk(src *broadcastwriter.BroadcastWriter, dst, stream string) error { + log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) + if err != nil { + return err + } + src.AddWriter(log, stream) + return nil +} + +func (daemon *Daemon) restore() error { + var ( + debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") + containers = make(map[string]*Container) + currentDriver = daemon.driver.String() + ) + + if !debug { + log.Infof("Loading containers: ") + } + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if !debug { + fmt.Print(".") + } + if err != nil { + log.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + log.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + registeredContainers := []*Container{} + + if entities := daemon.containerGraph.List("/", -1); entities != nil { + for _, p := range entities.Paths() { + if !debug { + fmt.Print(".") + } + + e := entities[p] + + if container, ok := containers[e.ID()]; ok { + if err := daemon.register(container, false); err != nil { + log.Debugf("Failed to register container %s: %s", container.ID, err) + } + + registeredContainers = append(registeredContainers, container) + + // delete from the map so that a new name is not automatically generated + delete(containers, e.ID()) + } + } + } + + // Any containers that are left over do not exist in the graph + for _, container := range containers { + // Try to set the default name for a container if it exists prior to links + container.Name, err = daemon.generateNewName(container.ID) + if err != nil { + log.Debugf("Setting default id - %s", err) + } + + if err := daemon.register(container, false); err != nil { + log.Debugf("Failed to register container %s: %s", container.ID, err) + } + + registeredContainers = append(registeredContainers, container) + } + + // check the restart policy on the containers and restart any container with + // the restart policy of "always" + if daemon.config.AutoRestart { + log.Debugf("Restarting containers...") + + for _, container := range registeredContainers { + if container.hostConfig.RestartPolicy.Name == "always" || + (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) { + log.Debugf("Starting container %s", container.ID) + + if err := container.Start(); err != nil { + log.Debugf("Failed to start container %s: %s", container.ID, err) + } + } + } + } + + for _, c := range registeredContainers { + c.registerVolumes() + } + + if !debug { + log.Infof(": done.") + } + + return nil +} + +func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool { + if config != nil { + if config.PortSpecs != nil { + for _, p := range config.PortSpecs { + if strings.Contains(p, ":") { + return true + } + } + } + } + return false +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) { + warnings := []string{} + if daemon.checkDeprecatedExpose(img.Config) || daemon.checkDeprecatedExpose(config) { + warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") + } + if img.Config != nil { + if err := runconfig.Merge(config, img.Config); err != nil { + return nil, err + } + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return nil, fmt.Errorf("No command specified") + } + return warnings, nil +} + +func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { + var ( + err error + id = utils.GenerateRandomID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(name) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + + if name[0] != '/' { + name = "/" + name + } + + if _, err := daemon.containerGraph.Set(name, id); err != nil { + if !graphdb.IsNonUniqueNameError(err) { + return "", err + } + + conflictingContainer, err := daemon.GetByName(name) + if err != nil { + if strings.Contains(err.Error(), "Could not find entity") { + return "", err + } + + // Remove name and continue starting the container + if err := daemon.containerGraph.Delete(name); err != nil { + return "", err + } + } else { + nameAsKnownByUser := strings.TrimPrefix(name, "/") + return "", fmt.Errorf( + "Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, + utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser) + } + } + return name, nil +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if _, err := daemon.containerGraph.Set(name, id); err != nil { + if !graphdb.IsNonUniqueNameError(err) { + return "", err + } + continue + } + return name, nil + } + + name = "/" + utils.TruncateID(id) + if _, err := daemon.containerGraph.Set(name, id); err != nil { + return "", err + } + return name, nil +} + +func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) { + // Generate default hostname + // FIXME: the lxc template no longer needs to set a default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string) (string, []string) { + var ( + entrypoint string + args []string + ) + if len(configEntrypoint) != 0 { + entrypoint = configEntrypoint[0] + args = append(configEntrypoint[1:], configCmd...) + } else { + entrypoint = configCmd[0] + args = configCmd[1:] + } + return entrypoint, args +} + +func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + con := strings.SplitN(opt, ":", 2) + if len(con) == 1 { + return fmt.Errorf("Invalid --security-opt: %q", opt) + } + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + default: + return fmt.Errorf("Invalid --security-opt: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) { + var ( + id string + err error + ) + id, name, err = daemon.generateIdAndName(name) + if err != nil { + return nil, err + } + + daemon.generateHostname(id, config) + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + container := &Container{ + // FIXME: we should generate the ID here instead of receiving it as an argument + ID: id, + Created: time.Now().UTC(), + Path: entrypoint, + Args: args, //FIXME: de-duplicate from config + Config: config, + hostConfig: &runconfig.HostConfig{}, + Image: img.ID, // Always use the resolved image id + NetworkSettings: &NetworkSettings{}, + Name: name, + Driver: daemon.driver.String(), + ExecDriver: daemon.execDriver.Name(), + State: NewState(), + execCommands: newExecStore(), + } + container.root = daemon.containerRoot(container.ID) + return container, err +} + +func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error { + // Step 1: create the container directory. + // This doubles as a barrier to avoid race conditions. + if err := os.Mkdir(container.root, 0700); err != nil { + return err + } + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Create(initID, img.ID); err != nil { + return err + } + initPath, err := daemon.driver.Get(initID, "") + if err != nil { + return err + } + defer daemon.driver.Put(initID) + + if err := graph.SetupInitLayer(initPath); err != nil { + return err + } + + if err := daemon.driver.Create(container.ID, initID); err != nil { + return err + } + return nil +} + +func GetFullContainerName(name string) (string, error) { + if name == "" { + return "", fmt.Errorf("Container name cannot be empty") + } + if name[0] != '/' { + name = "/" + name + } + return name, nil +} + +func (daemon *Daemon) GetByName(name string) (*Container, error) { + fullName, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + entity := daemon.containerGraph.Get(fullName) + if entity == nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(entity.ID()) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) + } + return e, nil +} + +func (daemon *Daemon) Children(name string) (map[string]*Container, error) { + name, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + children := make(map[string]*Container) + + err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { + c := daemon.Get(e.ID()) + if c == nil { + return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p) + } + children[p] = c + return nil + }, 0) + + if err != nil { + return nil, err + } + return children, nil +} + +func (daemon *Daemon) Parents(name string) ([]string, error) { + name, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + + return daemon.containerGraph.Parents(name) +} + +func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if !daemon.containerGraph.Exists(fullName) { + _, err := daemon.containerGraph.Set(fullName, child.ID) + return err + } + return nil +} + +func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { + if hostConfig != nil && hostConfig.Links != nil { + for _, l := range hostConfig.Links { + parts, err := parsers.PartParser("name:alias", l) + if err != nil { + return err + } + child, err := daemon.GetByName(parts["name"]) + if err != nil { + return err + } + if child == nil { + return fmt.Errorf("Could not get container for %s", parts["name"]) + } + if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + hostConfig.Links = nil + if err := container.WriteHostConfig(); err != nil { + return err + } + } + return nil +} + +// FIXME: harmonize with NewGraph() +func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) { + daemon, err := NewDaemonFromDirectory(config, eng) + if err != nil { + return nil, err + } + return daemon, nil +} + +func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) { + // Apply configuration defaults + if config.Mtu == 0 { + // FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore + config.Mtu = GetDefaultNetworkMtu() + } + // Check for mutually incompatible config options + if config.BridgeIface != "" && config.BridgeIP != "" { + return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") + } + if !config.EnableIptables && !config.InterContainerCommunication { + return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") + } + if !config.EnableIptables && config.EnableIpMasq { + config.EnableIpMasq = false + } + config.DisableNetwork = config.BridgeIface == disableNetworkBridge + + // Claim the pidfile first, to avoid any and all unexpected race conditions. + // Some of the init doesn't need a pidfile lock - but let's not try to be smart. + if config.Pidfile != "" { + if err := utils.CreatePidFile(config.Pidfile); err != nil { + return nil, err + } + eng.OnShutdown(func() { + // Always release the pidfile last, just in case + utils.RemovePidFile(config.Pidfile) + }) + } + + // Check that the system is supported and we have sufficient privileges + if runtime.GOOS != "linux" { + return nil, fmt.Errorf("The Docker daemon is only supported on linux") + } + if os.Geteuid() != 0 { + return nil, fmt.Errorf("The Docker daemon needs to be run as root") + } + if err := checkKernelAndArch(); err != nil { + return nil, err + } + + // set up the TempDir to use a canonical path + tmp, err := utils.TempDir(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := utils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + if !config.EnableSelinuxSupport { + selinuxSetDisabled() + } + + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = utils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + config.Root = realRoot + // Create the root directory if it doesn't exists + if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + // Set the default driver + graphdriver.DefaultDriver = config.GraphDriver + + // Load storage driver + driver, err := graphdriver.New(config.Root, config.GraphOptions) + if err != nil { + return nil, err + } + log.Debugf("Using graph driver %s", driver) + + // As Docker on btrfs and SELinux are incompatible at present, error on both being enabled + if selinuxEnabled() && config.EnableSelinuxSupport && driver.String() == "btrfs" { + return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver!") + } + + daemonRepo := path.Join(config.Root, "containers") + + if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + // Migrate the container if it is aufs and aufs is enabled + if err = migrateIfAufs(driver, config.Root); err != nil { + return nil, err + } + + log.Debugf("Creating images graph") + g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) + if err != nil { + return nil, err + } + + volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions) + if err != nil { + return nil, err + } + + volumes, err := volumes.NewRepository(path.Join(config.Root, "volumes"), volumesDriver) + if err != nil { + return nil, err + } + + log.Debugf("Creating repository list") + repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, config.Mirrors, config.InsecureRegistries) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store: %s", err) + } + + trustDir := path.Join(config.Root, "trust") + if err := os.MkdirAll(trustDir, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + t, err := trust.NewTrustStore(trustDir) + if err != nil { + return nil, fmt.Errorf("could not create trust store: %s", err) + } + + if !config.DisableNetwork { + job := eng.Job("init_networkdriver") + + job.SetenvBool("EnableIptables", config.EnableIptables) + job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) + job.SetenvBool("EnableIpForward", config.EnableIpForward) + job.SetenvBool("EnableIpMasq", config.EnableIpMasq) + job.Setenv("BridgeIface", config.BridgeIface) + job.Setenv("BridgeIP", config.BridgeIP) + job.Setenv("FixedCIDR", config.FixedCIDR) + job.Setenv("DefaultBindingIP", config.DefaultIp.String()) + + if err := job.Run(); err != nil { + return nil, err + } + } + + graphdbPath := path.Join(config.Root, "linkgraph.db") + graph, err := graphdb.NewSqliteConn(graphdbPath) + if err != nil { + return nil, err + } + + localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) + sysInitPath := utils.DockerInitPath(localCopy) + if sysInitPath == "" { + return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.com/contributing/devenvironment for official build instructions.") + } + + if sysInitPath != localCopy { + // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). + if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { + return nil, err + } + if err := os.Chmod(localCopy, 0700); err != nil { + return nil, err + } + sysInitPath = localCopy + } + + sysInfo := sysinfo.New(false) + ed, err := execdrivers.NewDriver(config.ExecDriver, config.Root, sysInitPath, sysInfo) + if err != nil { + return nil, err + } + + daemon := &Daemon{ + repository: daemonRepo, + containers: &contStore{s: make(map[string]*Container)}, + execCommands: newExecStore(), + graph: g, + repositories: repositories, + idIndex: truncindex.NewTruncIndex([]string{}), + sysInfo: sysInfo, + volumes: volumes, + config: config, + containerGraph: graph, + driver: driver, + sysInitPath: sysInitPath, + execDriver: ed, + eng: eng, + trustStore: t, + } + if err := daemon.restore(); err != nil { + return nil, err + } + // Setup shutdown handlers + // FIXME: can these shutdown handlers be registered closer to their source? + eng.OnShutdown(func() { + // FIXME: if these cleanup steps can be called concurrently, register + // them as separate handlers to speed up total shutdown time + // FIXME: use engine logging instead of log.Errorf + if err := daemon.shutdown(); err != nil { + log.Errorf("daemon.shutdown(): %s", err) + } + if err := portallocator.ReleaseAll(); err != nil { + log.Errorf("portallocator.ReleaseAll(): %s", err) + } + if err := daemon.driver.Cleanup(); err != nil { + log.Errorf("daemon.driver.Cleanup(): %s", err.Error()) + } + if err := daemon.containerGraph.Close(); err != nil { + log.Errorf("daemon.containerGraph.Close(): %s", err.Error()) + } + }) + + return daemon, nil +} + +func (daemon *Daemon) shutdown() error { + group := sync.WaitGroup{} + log.Debugf("starting clean shutdown of all containers...") + for _, container := range daemon.List() { + c := container + if c.IsRunning() { + log.Debugf("stopping %s", c.ID) + group.Add(1) + + go func() { + defer group.Done() + if err := c.KillSig(15); err != nil { + log.Debugf("kill 15 error for %s - %s", c.ID, err) + } + c.WaitStop(-1 * time.Second) + log.Debugf("container stopped %s", c.ID) + }() + } + } + group.Wait() + + return nil +} + +func (daemon *Daemon) Mount(container *Container) error { + dir, err := daemon.driver.Get(container.ID, container.GetMountLabel()) + if err != nil { + return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) + } + if container.basefs == "" { + container.basefs = dir + } else if container.basefs != dir { + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.driver, container.ID, container.basefs, dir) + } + return nil +} + +func (daemon *Daemon) Unmount(container *Container) error { + daemon.driver.Put(container.ID) + return nil +} + +func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { + initID := fmt.Sprintf("%s-init", container.ID) + return daemon.driver.Changes(container.ID, initID) +} + +func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { + initID := fmt.Sprintf("%s-init", container.ID) + return daemon.driver.Diff(container.ID, initID) +} + +func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return daemon.execDriver.Run(c.command, pipes, startCallback) +} + +func (daemon *Daemon) Pause(c *Container) error { + if err := daemon.execDriver.Pause(c.command); err != nil { + return err + } + c.SetPaused() + return nil +} + +func (daemon *Daemon) Unpause(c *Container) error { + if err := daemon.execDriver.Unpause(c.command); err != nil { + return err + } + c.SetUnpaused() + return nil +} + +func (daemon *Daemon) Kill(c *Container, sig int) error { + return daemon.execDriver.Kill(c.command, sig) +} + +// Nuke kills all containers then removes all content +// from the content root, including images, volumes and +// container filesystems. +// Again: this will remove your entire docker daemon! +// FIXME: this is deprecated, and only used in legacy +// tests. Please remove. +func (daemon *Daemon) Nuke() error { + var wg sync.WaitGroup + for _, container := range daemon.List() { + wg.Add(1) + go func(c *Container) { + c.Kill() + wg.Done() + }(container) + } + wg.Wait() + + return os.RemoveAll(daemon.config.Root) +} + +// FIXME: this is a convenience function for integration tests +// which need direct access to daemon.graph. +// Once the tests switch to using engine and jobs, this method +// can go away. +func (daemon *Daemon) Graph() *graph.Graph { + return daemon.graph +} + +func (daemon *Daemon) Repositories() *graph.TagStore { + return daemon.repositories +} + +func (daemon *Daemon) Config() *Config { + return daemon.config +} + +func (daemon *Daemon) SystemConfig() *sysinfo.SysInfo { + return daemon.sysInfo +} + +func (daemon *Daemon) SystemInitPath() string { + return daemon.sysInitPath +} + +func (daemon *Daemon) GraphDriver() graphdriver.Driver { + return daemon.driver +} + +func (daemon *Daemon) ExecutionDriver() execdriver.Driver { + return daemon.execDriver +} + +func (daemon *Daemon) ContainerGraph() *graphdb.Database { + return daemon.containerGraph +} + +func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { + // Retrieve all images + images, err := daemon.Graph().Map() + if err != nil { + return nil, err + } + + // Store the tree in a map of map (map[parentId][childId]) + imageMap := make(map[string]map[string]struct{}) + for _, img := range images { + if _, exists := imageMap[img.Parent]; !exists { + imageMap[img.Parent] = make(map[string]struct{}) + } + imageMap[img.Parent][img.ID] = struct{}{} + } + + // Loop on the children of the given image and check the config + var match *image.Image + for elem := range imageMap[imgID] { + img, err := daemon.Graph().Get(elem) + if err != nil { + return nil, err + } + if runconfig.Compare(&img.ContainerConfig, config) { + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil +} + +func checkKernelAndArch() error { + // Check for unsupported architectures + if runtime.GOARCH != "amd64" { + return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) + } + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.8 crashes are clearer. + // For details see http://github.com/docker/docker/issues/407 + if k, err := kernel.GetKernelVersion(); err != nil { + log.Infof("WARNING: %s", err) + } else { + if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) + } + } + } + return nil +} diff --git a/daemon/daemon_aufs.go b/daemon/daemon_aufs.go new file mode 100644 index 00000000..a370a4ce --- /dev/null +++ b/daemon/daemon_aufs.go @@ -0,0 +1,22 @@ +// +build !exclude_graphdriver_aufs + +package daemon + +import ( + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/aufs" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/log" +) + +// Given the graphdriver ad, if it is aufs, then migrate it. +// If aufs driver is not built, this func is a noop. +func migrateIfAufs(driver graphdriver.Driver, root string) error { + if ad, ok := driver.(*aufs.Driver); ok { + log.Debugf("Migrating existing containers") + if err := ad.Migrate(root, graph.SetupInitLayer); err != nil { + return err + } + } + return nil +} diff --git a/daemon/daemon_btrfs.go b/daemon/daemon_btrfs.go new file mode 100644 index 00000000..cd505c35 --- /dev/null +++ b/daemon/daemon_btrfs.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_btrfs + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/daemon/daemon_devicemapper.go b/daemon/daemon_devicemapper.go new file mode 100644 index 00000000..47775455 --- /dev/null +++ b/daemon/daemon_devicemapper.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_devicemapper + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/daemon/daemon_no_aufs.go b/daemon/daemon_no_aufs.go new file mode 100644 index 00000000..06cdc776 --- /dev/null +++ b/daemon/daemon_no_aufs.go @@ -0,0 +1,11 @@ +// +build exclude_graphdriver_aufs + +package daemon + +import ( + "github.com/docker/docker/daemon/graphdriver" +) + +func migrateIfAufs(driver graphdriver.Driver, root string) error { + return nil +} diff --git a/daemon/daemon_unit_test.go b/daemon/daemon_unit_test.go new file mode 100644 index 00000000..fbc3302a --- /dev/null +++ b/daemon/daemon_unit_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/runconfig" +) + +func TestParseSecurityOpt(t *testing.T) { + container := &Container{} + config := &runconfig.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor:test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test valid label + config.SecurityOpt = []string{"label:user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} diff --git a/daemon/delete.go b/daemon/delete.go new file mode 100644 index 00000000..77be926c --- /dev/null +++ b/daemon/delete.go @@ -0,0 +1,125 @@ +package daemon + +import ( + "fmt" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" +) + +func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + removeVolume := job.GetenvBool("removeVolume") + removeLink := job.GetenvBool("removeLink") + forceRemove := job.GetenvBool("forceRemove") + container := daemon.Get(name) + + if container == nil { + return job.Errorf("No such container: %s", name) + } + + if removeLink { + name, err := GetFullContainerName(name) + if err != nil { + job.Error(err) + } + parent, n := path.Split(name) + if parent == "/" { + return job.Errorf("Conflict, cannot remove the default name of the container") + } + pe := daemon.ContainerGraph().Get(parent) + if pe == nil { + return job.Errorf("Cannot get parent %s for name %s", parent, name) + } + parentContainer := daemon.Get(pe.ID()) + + if parentContainer != nil { + parentContainer.DisableLink(n) + } + + if err := daemon.ContainerGraph().Delete(name); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + + if container != nil { + if container.IsRunning() { + if forceRemove { + if err := container.Kill(); err != nil { + return job.Errorf("Could not kill running container, cannot remove - %v", err) + } + } else { + return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f") + } + } + if err := daemon.Destroy(container); err != nil { + return job.Errorf("Cannot destroy container %s: %s", name, err) + } + container.LogEvent("destroy") + if removeVolume { + daemon.DeleteVolumes(container.VolumePaths()) + } + } + return engine.StatusOK +} + +func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) { + for id := range volumeIDs { + if err := daemon.volumes.Delete(id); err != nil { + log.Infof("%s", err) + continue + } + } +} + +// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem. +// FIXME: rename to Rm for consistency with the CLI command +func (daemon *Daemon) Destroy(container *Container) error { + if container == nil { + return fmt.Errorf("The given container is ") + } + + element := daemon.containers.Get(container.ID) + if element == nil { + return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) + } + + if err := container.Stop(3); err != nil { + return err + } + + // Deregister the container before removing its directory, to avoid race conditions + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + container.derefVolumes() + if _, err := daemon.containerGraph.Purge(container.ID); err != nil { + log.Debugf("Unable to remove container from link graph: %s", err) + } + + if err := daemon.driver.Remove(container.ID); err != nil { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) + } + + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Remove(initID); err != nil { + return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) + } + + if err := os.RemoveAll(container.root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + if err := daemon.execDriver.Clean(container.ID); err != nil { + return fmt.Errorf("Unable to remove execdriver data for %s: %s", container.ID, err) + } + + selinuxFreeLxcContexts(container.ProcessLabel) + + return nil +} diff --git a/daemon/exec.go b/daemon/exec.go new file mode 100644 index 00000000..0ab1c0bf --- /dev/null +++ b/daemon/exec.go @@ -0,0 +1,301 @@ +// build linux + +package daemon + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +type execConfig struct { + sync.Mutex + ID string + Running bool + ProcessConfig execdriver.ProcessConfig + StreamConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + Container *Container +} + +type execStore struct { + s map[string]*execConfig + sync.Mutex +} + +func newExecStore() *execStore { + return &execStore{s: make(map[string]*execConfig, 0)} +} + +func (e *execStore) Add(id string, execConfig *execConfig) { + e.Lock() + e.s[id] = execConfig + e.Unlock() +} + +func (e *execStore) Get(id string) *execConfig { + e.Lock() + res := e.s[id] + e.Unlock() + return res +} + +func (e *execStore) Delete(id string) { + e.Lock() + delete(e.s, id) + e.Unlock() +} + +func (execConfig *execConfig) Resize(h, w int) error { + return execConfig.ProcessConfig.Terminal.Resize(h, w) +} + +func (d *Daemon) registerExecCommand(execConfig *execConfig) { + // Storing execs in container inorder to kill them gracefully whenever the container is stopped or removed. + execConfig.Container.execCommands.Add(execConfig.ID, execConfig) + // Storing execs in daemon for easy access via remote API. + d.execCommands.Add(execConfig.ID, execConfig) +} + +func (d *Daemon) getExecConfig(name string) (*execConfig, error) { + if execConfig := d.execCommands.Get(name); execConfig != nil { + if !execConfig.Container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running", execConfig.Container.ID) + } + return execConfig, nil + } + + return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name) +} + +func (d *Daemon) unregisterExecCommand(execConfig *execConfig) { + execConfig.Container.execCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*Container, error) { + container := d.Get(name) + + if container == nil { + return nil, fmt.Errorf("No such container: %s", name) + } + + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running", name) + } + + return container, nil +} + +func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s [options] container command [args]", job.Name) + } + + if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) { + return job.Error(lxc.ErrExec) + } + + var name = job.Args[0] + + container, err := d.getActiveContainer(name) + if err != nil { + return job.Error(err) + } + + config := runconfig.ExecConfigFromJob(job) + + entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) + + processConfig := execdriver.ProcessConfig{ + Privileged: config.Privileged, + User: config.User, + Tty: config.Tty, + Entrypoint: entrypoint, + Arguments: args, + } + + execConfig := &execConfig{ + ID: utils.GenerateRandomID(), + OpenStdin: config.AttachStdin, + OpenStdout: config.AttachStdout, + OpenStderr: config.AttachStderr, + StreamConfig: StreamConfig{}, + ProcessConfig: processConfig, + Container: container, + Running: false, + } + + d.registerExecCommand(execConfig) + + job.Printf("%s\n", execConfig.ID) + + return engine.StatusOK +} + +func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s [options] exec", job.Name) + } + + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + cStdinCloser io.Closer + execName = job.Args[0] + ) + + execConfig, err := d.getExecConfig(execName) + if err != nil { + return job.Error(err) + } + + func() { + execConfig.Lock() + defer execConfig.Unlock() + if execConfig.Running { + err = fmt.Errorf("Error: Exec command %s is already running", execName) + } + execConfig.Running = true + }() + if err != nil { + return job.Error(err) + } + + log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) + container := execConfig.Container + + if execConfig.OpenStdin { + r, w := io.Pipe() + go func() { + defer w.Close() + io.Copy(w, job.Stdin) + }() + cStdin = r + cStdinCloser = job.Stdin + } + if execConfig.OpenStdout { + cStdout = job.Stdout + } + if execConfig.OpenStderr { + cStderr = job.Stderr + } + + execConfig.StreamConfig.stderr = broadcastwriter.New() + execConfig.StreamConfig.stdout = broadcastwriter.New() + // Attach to stdin + if execConfig.OpenStdin { + execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() + } else { + execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin + } + + attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr) + + execErr := make(chan error) + + // Remove exec from daemon and container. + defer d.unregisterExecCommand(execConfig) + + go func() { + err := container.Exec(execConfig) + if err != nil { + execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) + } + }() + + select { + case err := <-attachErr: + if err != nil { + return job.Errorf("attach failed with error: %s", err) + } + break + case err := <-execErr: + return job.Error(err) + } + + return engine.StatusOK +} + +func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback) +} + +func (container *Container) Exec(execConfig *execConfig) error { + container.Lock() + defer container.Unlock() + + waitStart := make(chan struct{}) + + callback := func(processConfig *execdriver.ProcessConfig, pid int) { + if processConfig.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave + // which we close here. + if c, ok := processConfig.Stdout.(io.Closer); ok { + c.Close() + } + } + close(waitStart) + } + + // We use a callback here instead of a goroutine and an chan for + // syncronization purposes + cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) }) + + // Exec should not return until the process is actually running + select { + case <-waitStart: + case err := <-cErr: + return err + } + + return nil +} + +func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error { + var ( + err error + exitCode int + ) + + pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin) + exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback) + if err != nil { + log.Errorf("Error running command in existing container %s: %s", container.ID, err) + } + + log.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) + if execConfig.OpenStdin { + if err := execConfig.StreamConfig.stdin.Close(); err != nil { + log.Errorf("Error closing stdin while running in %s: %s", container.ID, err) + } + } + if err := execConfig.StreamConfig.stdout.Clean(); err != nil { + log.Errorf("Error closing stdout while running in %s: %s", container.ID, err) + } + if err := execConfig.StreamConfig.stderr.Clean(); err != nil { + log.Errorf("Error closing stderr while running in %s: %s", container.ID, err) + } + if execConfig.ProcessConfig.Terminal != nil { + if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { + log.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) + } + } + + return err +} diff --git a/daemon/execdriver/MAINTAINERS b/daemon/execdriver/MAINTAINERS new file mode 100644 index 00000000..68a97d2f --- /dev/null +++ b/daemon/execdriver/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go new file mode 100644 index 00000000..22e4c464 --- /dev/null +++ b/daemon/execdriver/driver.go @@ -0,0 +1,121 @@ +package execdriver + +import ( + "errors" + "io" + "os" + "os/exec" + + "github.com/docker/libcontainer/devices" +) + +// Context is a generic key value pair that allows +// arbatrary data to be sent +type Context map[string]string + +var ( + ErrNotRunning = errors.New("Process could not be started") + ErrWaitTimeoutReached = errors.New("Wait timeout reached") + ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") + ErrDriverNotFound = errors.New("The requested docker init has not been found") +) + +type StartCallback func(*ProcessConfig, int) + +// Driver specific information based on +// processes registered with the driver +type Info interface { + IsRunning() bool +} + +// Terminal in an interface for drivers to implement +// if they want to support Close and Resize calls from +// the core +type Terminal interface { + io.Closer + Resize(height, width int) error +} + +type TtyTerminal interface { + Master() *os.File +} + +type Driver interface { + Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code + // Exec executes the process in an existing container, blocks until the process exits and returns the exit code + Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error) + Kill(c *Command, sig int) error + Pause(c *Command) error + Unpause(c *Command) error + Name() string // Driver name + Info(id string) Info // "temporary" hack (until we move state from core to plugins) + GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. + Terminate(c *Command) error // kill it with fire + Clean(id string) error // clean all traces of container exec +} + +// Network settings of the container +type Network struct { + Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled + Mtu int `json:"mtu"` + ContainerID string `json:"container_id"` // id of the container to join network. + HostNetworking bool `json:"host_networking"` +} + +type NetworkInterface struct { + Gateway string `json:"gateway"` + IPAddress string `json:"ip"` + IPPrefixLen int `json:"ip_prefix_len"` + MacAddress string `json:"mac_address"` + Bridge string `json:"bridge"` +} + +type Resources struct { + Memory int64 `json:"memory"` + MemorySwap int64 `json:"memory_swap"` + CpuShares int64 `json:"cpu_shares"` + Cpuset string `json:"cpuset"` +} + +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Private bool `json:"private"` + Slave bool `json:"slave"` +} + +// Describes a process that will be run inside a container. +type ProcessConfig struct { + exec.Cmd `json:"-"` + + Privileged bool `json:"privileged"` + User string `json:"user"` + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Terminal Terminal `json:"-"` // standard or tty terminal + Console string `json:"-"` // dev/console path +} + +// Process wrapps an os/exec.Cmd to add more metadata +type Command struct { + ID string `json:"id"` + Rootfs string `json:"rootfs"` // root fs of the container + InitPath string `json:"initpath"` // dockerinit + WorkingDir string `json:"working_dir"` + ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Network *Network `json:"network"` + Resources *Resources `json:"resources"` + Mounts []Mount `json:"mounts"` + AllowedDevices []*devices.Device `json:"allowed_devices"` + AutoCreatedDevices []*devices.Device `json:"autocreated_devices"` + CapAdd []string `json:"cap_add"` + CapDrop []string `json:"cap_drop"` + ContainerPid int `json:"container_pid"` // the pid for the process inside a container + ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container. + ProcessLabel string `json:"process_label"` + MountLabel string `json:"mount_label"` + LxcConfig []string `json:"lxc_config"` + AppArmorProfile string `json:"apparmor_profile"` +} diff --git a/daemon/execdriver/execdrivers/execdrivers.go b/daemon/execdriver/execdrivers/execdrivers.go new file mode 100644 index 00000000..2a050b48 --- /dev/null +++ b/daemon/execdriver/execdrivers/execdrivers.go @@ -0,0 +1,23 @@ +package execdrivers + +import ( + "fmt" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/pkg/sysinfo" + "path" +) + +func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { + switch name { + case "lxc": + // we want to give the lxc driver the full docker root because it needs + // to access and write config and template files in /var/lib/docker/containers/* + // to be backwards compatible + return lxc.NewDriver(root, initPath, sysInfo.AppArmor) + case "native": + return native.NewDriver(path.Join(root, "execdriver", "native"), initPath) + } + return nil, fmt.Errorf("unknown exec driver %s", name) +} diff --git a/daemon/execdriver/lxc/MAINTAINERS b/daemon/execdriver/lxc/MAINTAINERS new file mode 100644 index 00000000..e9753be6 --- /dev/null +++ b/daemon/execdriver/lxc/MAINTAINERS @@ -0,0 +1 @@ +Dinesh Subhraveti (@dineshs-altiscale) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go new file mode 100644 index 00000000..0809b05c --- /dev/null +++ b/daemon/execdriver/lxc/driver.go @@ -0,0 +1,527 @@ +package lxc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/kr/pty" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/mount/nodes" +) + +const DriverName = "lxc" + +var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver") + +type driver struct { + root string // root path for the driver to use + initPath string + apparmor bool + sharedRoot bool +} + +func NewDriver(root, initPath string, apparmor bool) (*driver, error) { + // setup unconfined symlink + if err := linkLxcStart(root); err != nil { + return nil, err + } + + return &driver{ + apparmor: apparmor, + root: root, + initPath: initPath, + sharedRoot: rootIsShared(), + }, nil +} + +func (d *driver) Name() string { + version := d.version() + return fmt.Sprintf("%s-%s", DriverName, version) +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + var ( + term execdriver.Terminal + err error + ) + + if c.ProcessConfig.Tty { + term, err = NewTtyConsole(&c.ProcessConfig, pipes) + } else { + term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) + } + c.ProcessConfig.Terminal = term + + c.Mounts = append(c.Mounts, execdriver.Mount{ + Source: d.initPath, + Destination: c.InitPath, + Writable: false, + Private: true, + }) + + if err := d.generateEnvConfig(c); err != nil { + return -1, err + } + configPath, err := d.generateLXCConfig(c) + if err != nil { + return -1, err + } + params := []string{ + "lxc-start", + "-n", c.ID, + "-f", configPath, + "--", + c.InitPath, + } + + if c.Network.Interface != nil { + params = append(params, + "-g", c.Network.Interface.Gateway, + "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), + ) + } + params = append(params, + "-mtu", strconv.Itoa(c.Network.Mtu), + ) + + if c.ProcessConfig.User != "" { + params = append(params, "-u", c.ProcessConfig.User) + } + + if c.ProcessConfig.Privileged { + if d.apparmor { + params[0] = path.Join(d.root, "lxc-start-unconfined") + + } + params = append(params, "-privileged") + } + + if c.WorkingDir != "" { + params = append(params, "-w", c.WorkingDir) + } + + if len(c.CapAdd) > 0 { + params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":"))) + } + + if len(c.CapDrop) > 0 { + params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":"))) + } + + params = append(params, "--", c.ProcessConfig.Entrypoint) + params = append(params, c.ProcessConfig.Arguments...) + + if d.sharedRoot { + // lxc-start really needs / to be non-shared, or all kinds of stuff break + // when lxc-start unmount things and those unmounts propagate to the main + // mount namespace. + // What we really want is to clone into a new namespace and then + // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork + // without exec in go we have to do this horrible shell hack... + shellString := + "mount --make-rslave /; exec " + + utils.ShellQuoteArguments(params) + + params = []string{ + "unshare", "-m", "--", "/bin/sh", "-c", shellString, + } + } + + var ( + name = params[0] + arg = params[1:] + ) + aname, err := exec.LookPath(name) + if err != nil { + aname = name + } + c.ProcessConfig.Path = aname + c.ProcessConfig.Args = append([]string{name}, arg...) + + if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { + return -1, err + } + + if err := c.ProcessConfig.Start(); err != nil { + return -1, err + } + + var ( + waitErr error + waitLock = make(chan struct{}) + ) + + go func() { + if err := c.ProcessConfig.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 + waitErr = err + } + } + close(waitLock) + }() + + // Poll lxc for RUNNING status + pid, err := d.waitForStart(c, waitLock) + if err != nil { + if c.ProcessConfig.Process != nil { + c.ProcessConfig.Process.Kill() + c.ProcessConfig.Wait() + } + return -1, err + } + + c.ContainerPid = pid + + if startCallback != nil { + startCallback(&c.ProcessConfig, pid) + } + + <-waitLock + + return getExitCode(c), waitErr +} + +/// Return the exit code of the process +// if the process has not exited -1 will be returned +func getExitCode(c *execdriver.Command) int { + if c.ProcessConfig.ProcessState == nil { + return -1 + } + return c.ProcessConfig.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (d *driver) Kill(c *execdriver.Command, sig int) error { + return KillLxc(c.ID, sig) +} + +func (d *driver) Pause(c *execdriver.Command) error { + _, err := exec.LookPath("lxc-freeze") + if err == nil { + output, errExec := exec.Command("lxc-freeze", "-n", c.ID).CombinedOutput() + if errExec != nil { + return fmt.Errorf("Err: %s Output: %s", errExec, output) + } + } + + return err +} + +func (d *driver) Unpause(c *execdriver.Command) error { + _, err := exec.LookPath("lxc-unfreeze") + if err == nil { + output, errExec := exec.Command("lxc-unfreeze", "-n", c.ID).CombinedOutput() + if errExec != nil { + return fmt.Errorf("Err: %s Output: %s", errExec, output) + } + } + + return err +} + +func (d *driver) Terminate(c *execdriver.Command) error { + return KillLxc(c.ID, 9) +} + +func (d *driver) version() string { + var ( + version string + output []byte + err error + ) + if _, errPath := exec.LookPath("lxc-version"); errPath == nil { + output, err = exec.Command("lxc-version").CombinedOutput() + } else { + output, err = exec.Command("lxc-start", "--version").CombinedOutput() + } + if err == nil { + version = strings.TrimSpace(string(output)) + if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { + version = strings.TrimSpace(parts[1]) + } + } + return version +} + +func KillLxc(id string, sig int) error { + var ( + err error + output []byte + ) + _, err = exec.LookPath("lxc-kill") + if err == nil { + output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() + } else { + output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() + } + if err != nil { + return fmt.Errorf("Err: %s Output: %s", err, output) + } + return nil +} + +// wait for the process to start and return the pid for the process +func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { + var ( + err error + output []byte + ) + // We wait for the container to be fully running. + // Timeout after 5 seconds. In case of broken pipe, just retry. + // Note: The container can run and finish correctly before + // the end of this loop + for now := time.Now(); time.Since(now) < 5*time.Second; { + select { + case <-waitLock: + // If the process dies while waiting for it, just return + return -1, nil + default: + } + + output, err = d.getInfo(c.ID) + if err == nil { + info, err := parseLxcInfo(string(output)) + if err != nil { + return -1, err + } + if info.Running { + return info.Pid, nil + } + } + time.Sleep(50 * time.Millisecond) + } + return -1, execdriver.ErrNotRunning +} + +func (d *driver) getInfo(id string) ([]byte, error) { + return exec.Command("lxc-info", "-n", id).CombinedOutput() +} + +type info struct { + ID string + driver *driver +} + +func (i *info) IsRunning() bool { + var running bool + + output, err := i.driver.getInfo(i.ID) + if err != nil { + log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) + return false + } + if strings.Contains(string(output), "RUNNING") { + running = true + } + return running +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + pids := []int{} + + // cpu is chosen because it is the only non optional subsystem in cgroups + subsystem := "cpu" + cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return pids, err + } + + cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) + if err != nil { + return pids, err + } + + filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") + if _, err := os.Stat(filename); os.IsNotExist(err) { + // With more recent lxc versions use, cgroup will be in lxc/ + filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") + } + + output, err := ioutil.ReadFile(filename) + if err != nil { + return pids, err + } + for _, p := range strings.Split(string(output), "\n") { + if len(p) == 0 { + continue + } + pid, err := strconv.Atoi(p) + if err != nil { + return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) + } + pids = append(pids, pid) + } + return pids, nil +} + +func linkLxcStart(root string) error { + sourcePath, err := exec.LookPath("lxc-start") + if err != nil { + return err + } + targetPath := path.Join(root, "lxc-start-unconfined") + + if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + if err := os.Remove(targetPath); err != nil { + return err + } + } + return os.Symlink(sourcePath, targetPath) +} + +// TODO: This can be moved to the mountinfo reader in the mount pkg +func rootIsShared() bool { + if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { + for _, line := range strings.Split(string(data), "\n") { + cols := strings.Split(line, " ") + if len(cols) >= 6 && cols[4] == "/" { + return strings.HasPrefix(cols[6], "shared") + } + } + } + + // No idea, probably safe to assume so + return true +} + +func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { + root := path.Join(d.root, "containers", c.ID, "config.lxc") + + fo, err := os.Create(root) + if err != nil { + return "", err + } + defer fo.Close() + + if err := LxcTemplateCompiled.Execute(fo, struct { + *execdriver.Command + AppArmor bool + }{ + Command: c, + AppArmor: d.apparmor, + }); err != nil { + return "", err + } + + return root, nil +} + +func (d *driver) generateEnvConfig(c *execdriver.Command) error { + data, err := json.Marshal(c.ProcessConfig.Env) + if err != nil { + return err + } + p := path.Join(d.root, "containers", c.ID, "config.env") + c.Mounts = append(c.Mounts, execdriver.Mount{ + Source: p, + Destination: "/.dockerenv", + Writable: false, + Private: true, + }) + + return ioutil.WriteFile(p, data, 0600) +} + +// Clean not implemented for lxc +func (d *driver) Clean(id string) error { + return nil +} + +type TtyConsole struct { + MasterPty *os.File + SlavePty *os.File +} + +func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) { + // lxc is special in that we cannot create the master outside of the container without + // opening the slave because we have nothing to provide to the cmd. We have to open both then do + // the crazy setup on command right now instead of passing the console path to lxc and telling it + // to open up that console. we save a couple of openfiles in the native driver because we can do + // this. + ptyMaster, ptySlave, err := pty.Open() + if err != nil { + return nil, err + } + + tty := &TtyConsole{ + MasterPty: ptyMaster, + SlavePty: ptySlave, + } + + if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + + processConfig.Console = tty.SlavePty.Name() + + return tty, nil +} + +func (t *TtyConsole) Master() *os.File { + return t.MasterPty +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { + command.Stdout = t.SlavePty + command.Stderr = t.SlavePty + + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + command.Stdin = t.SlavePty + command.SysProcAttr.Setctty = true + + go func() { + io.Copy(t.MasterPty, pipes.Stdin) + + pipes.Stdin.Close() + }() + } + return nil +} + +func (t *TtyConsole) Close() error { + t.SlavePty.Close() + return t.MasterPty.Close() +} + +func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return -1, ErrExec +} diff --git a/daemon/execdriver/lxc/info.go b/daemon/execdriver/lxc/info.go new file mode 100644 index 00000000..27b4c586 --- /dev/null +++ b/daemon/execdriver/lxc/info.go @@ -0,0 +1,50 @@ +package lxc + +import ( + "bufio" + "errors" + "strconv" + "strings" +) + +var ( + ErrCannotParse = errors.New("cannot parse raw input") +) + +type lxcInfo struct { + Running bool + Pid int +} + +func parseLxcInfo(raw string) (*lxcInfo, error) { + if raw == "" { + return nil, ErrCannotParse + } + var ( + err error + s = bufio.NewScanner(strings.NewReader(raw)) + info = &lxcInfo{} + ) + for s.Scan() { + text := s.Text() + + if s.Err() != nil { + return nil, s.Err() + } + + parts := strings.Split(text, ":") + if len(parts) < 2 { + continue + } + switch strings.ToLower(strings.TrimSpace(parts[0])) { + case "state": + info.Running = strings.TrimSpace(parts[1]) == "RUNNING" + case "pid": + info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + } + } + return info, nil +} diff --git a/daemon/execdriver/lxc/info_test.go b/daemon/execdriver/lxc/info_test.go new file mode 100644 index 00000000..edafc025 --- /dev/null +++ b/daemon/execdriver/lxc/info_test.go @@ -0,0 +1,36 @@ +package lxc + +import ( + "testing" +) + +func TestParseRunningInfo(t *testing.T) { + raw := ` + state: RUNNING + pid: 50` + + info, err := parseLxcInfo(raw) + if err != nil { + t.Fatal(err) + } + if !info.Running { + t.Fatal("info should return a running state") + } + if info.Pid != 50 { + t.Fatalf("info should have pid 50 got %d", info.Pid) + } +} + +func TestEmptyInfo(t *testing.T) { + _, err := parseLxcInfo("") + if err == nil { + t.Fatal("error should not be nil") + } +} + +func TestBadInfo(t *testing.T) { + _, err := parseLxcInfo("state") + if err != nil { + t.Fatal(err) + } +} diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go new file mode 100644 index 00000000..680f53e1 --- /dev/null +++ b/daemon/execdriver/lxc/init.go @@ -0,0 +1,213 @@ +package lxc + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "runtime" + "strings" + "syscall" + + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libcontainer/netlink" +) + +// Args provided to the init function for a driver +type InitArgs struct { + User string + Gateway string + Ip string + WorkDir string + Privileged bool + Env []string + Args []string + Mtu int + Console string + Pipe int + Root string + CapAdd string + CapDrop string +} + +func init() { + // like always lxc requires a hack to get this to work + reexec.Register("/.dockerinit", dockerInititalizer) +} + +func dockerInititalizer() { + initializer() +} + +// initializer is the lxc driver's init function that is run inside the namespace to setup +// additional configurations +func initializer() { + runtime.LockOSThread() + + args := getArgs() + + if err := setupNamespace(args); err != nil { + log.Fatal(err) + } +} + +func setupNamespace(args *InitArgs) error { + if err := setupEnv(args); err != nil { + return err + } + if err := setupHostname(args); err != nil { + return err + } + if err := setupNetworking(args); err != nil { + return err + } + if err := finalizeNamespace(args); err != nil { + return err + } + + path, err := exec.LookPath(args.Args[0]) + if err != nil { + log.Printf("Unable to locate %v", args.Args[0]) + os.Exit(127) + } + + if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { + return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) + } + + return nil +} + +func getArgs() *InitArgs { + var ( + // Get cmdline arguments + user = flag.String("u", "", "username or uid") + gateway = flag.String("g", "", "gateway address") + ip = flag.String("i", "", "ip address") + workDir = flag.String("w", "", "workdir") + privileged = flag.Bool("privileged", false, "privileged mode") + mtu = flag.Int("mtu", 1500, "interface mtu") + capAdd = flag.String("cap-add", "", "capabilities to add") + capDrop = flag.String("cap-drop", "", "capabilities to drop") + ) + + flag.Parse() + + return &InitArgs{ + User: *user, + Gateway: *gateway, + Ip: *ip, + WorkDir: *workDir, + Privileged: *privileged, + Args: flag.Args(), + Mtu: *mtu, + CapAdd: *capAdd, + CapDrop: *capDrop, + } +} + +// Clear environment pollution introduced by lxc-start +func setupEnv(args *InitArgs) error { + // Get env + var env []string + content, err := ioutil.ReadFile(".dockerenv") + if err != nil { + return fmt.Errorf("Unable to load environment variables: %v", err) + } + if err := json.Unmarshal(content, &env); err != nil { + return fmt.Errorf("Unable to unmarshal environment variables: %v", err) + } + // Propagate the plugin-specific container env variable + env = append(env, "container="+os.Getenv("container")) + + args.Env = env + + os.Clearenv() + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) == 1 { + parts = append(parts, "") + } + os.Setenv(parts[0], parts[1]) + } + + return nil +} + +func setupHostname(args *InitArgs) error { + hostname := getEnv(args, "HOSTNAME") + if hostname == "" { + return nil + } + return setHostname(hostname) +} + +// Setup networking +func setupNetworking(args *InitArgs) error { + if args.Ip != "" { + // eth0 + iface, err := net.InterfaceByName("eth0") + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + ip, ipNet, err := net.ParseCIDR(args.Ip) + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { + return fmt.Errorf("Unable to set MTU: %v", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + + // loopback + iface, err = net.InterfaceByName("lo") + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + } + if args.Gateway != "" { + gw := net.ParseIP(args.Gateway) + if gw == nil { + return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) + } + + if err := netlink.AddDefaultGw(gw.String(), "eth0"); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + } + + return nil +} + +// Setup working directory +func setupWorkingDirectory(args *InitArgs) error { + if args.WorkDir == "" { + return nil + } + if err := syscall.Chdir(args.WorkDir); err != nil { + return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) + } + return nil +} + +func getEnv(args *InitArgs, key string) string { + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if parts[0] == key && len(parts) == 2 { + return parts[1] + } + } + return "" +} diff --git a/daemon/execdriver/lxc/lxc_init_linux.go b/daemon/execdriver/lxc/lxc_init_linux.go new file mode 100644 index 00000000..625caa16 --- /dev/null +++ b/daemon/execdriver/lxc/lxc_init_linux.go @@ -0,0 +1,78 @@ +package lxc + +import ( + "fmt" + "strings" + "syscall" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/native/template" + "github.com/docker/libcontainer/namespaces" + "github.com/docker/libcontainer/security/capabilities" + "github.com/docker/libcontainer/system" + "github.com/docker/libcontainer/utils" +) + +func setHostname(hostname string) error { + return syscall.Sethostname([]byte(hostname)) +} + +func finalizeNamespace(args *InitArgs) error { + if err := utils.CloseExecFrom(3); err != nil { + return err + } + + // We use the native drivers default template so that things like caps are consistent + // across both drivers + container := template.New() + + if !args.Privileged { + // drop capabilities in bounding set before changing user + if err := capabilities.DropBoundingSet(container.Capabilities); err != nil { + return fmt.Errorf("drop bounding set %s", err) + } + + // preserve existing capabilities while we change users + if err := system.SetKeepCaps(); err != nil { + return fmt.Errorf("set keep caps %s", err) + } + } + + if err := namespaces.SetupUser(args.User); err != nil { + return fmt.Errorf("setup user %s", err) + } + + if !args.Privileged { + if err := system.ClearKeepCaps(); err != nil { + return fmt.Errorf("clear keep caps %s", err) + } + + var ( + adds []string + drops []string + ) + + if args.CapAdd != "" { + adds = strings.Split(args.CapAdd, ":") + } + if args.CapDrop != "" { + drops = strings.Split(args.CapDrop, ":") + } + + caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) + if err != nil { + return err + } + + // drop all other capabilities + if err := capabilities.DropCapabilities(caps); err != nil { + return fmt.Errorf("drop capabilities %s", err) + } + } + + if err := setupWorkingDirectory(args); err != nil { + return err + } + + return nil +} diff --git a/daemon/execdriver/lxc/lxc_init_unsupported.go b/daemon/execdriver/lxc/lxc_init_unsupported.go new file mode 100644 index 00000000..b3f2ae68 --- /dev/null +++ b/daemon/execdriver/lxc/lxc_init_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package lxc + +import "github.com/docker/docker/daemon/execdriver" + +func setHostname(hostname string) error { + panic("Not supported on darwin") +} + +func finalizeNamespace(args *execdriver.InitArgs) error { + panic("Not supported on darwin") +} diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go new file mode 100644 index 00000000..2cd63dc7 --- /dev/null +++ b/daemon/execdriver/lxc/lxc_template.go @@ -0,0 +1,151 @@ +package lxc + +import ( + "strings" + "text/template" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/libcontainer/label" +) + +const LxcTemplate = ` +{{if .Network.Interface}} +# network configuration +lxc.network.type = veth +lxc.network.link = {{.Network.Interface.Bridge}} +lxc.network.name = eth0 +lxc.network.mtu = {{.Network.Mtu}} +{{else if .Network.HostNetworking}} +lxc.network.type = none +{{else}} +# network is disabled (-n=false) +lxc.network.type = empty +lxc.network.flags = up +lxc.network.mtu = {{.Network.Mtu}} +{{end}} + +# root filesystem +{{$ROOTFS := .Rootfs}} +lxc.rootfs = {{$ROOTFS}} + +# use a dedicated pts for the container (and limit the number of pseudo terminal +# available) +lxc.pts = 1024 + +# disable the main console +lxc.console = none + +# no controlling tty at all +lxc.tty = 1 + +{{if .ProcessConfig.Privileged}} +lxc.cgroup.devices.allow = a +{{else}} +# no implicit access to devices +lxc.cgroup.devices.deny = a +#Allow the devices passed to us in the AllowedDevices list. +{{range $allowedDevice := .AllowedDevices}} +lxc.cgroup.devices.allow = {{$allowedDevice.GetCgroupAllowString}} +{{end}} +{{end}} + +# standard mount point +# Use mnt.putold as per https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/986385 +lxc.pivotdir = lxc_putold + +# NOTICE: These mounts must be applied within the namespace + +# WARNING: mounting procfs and/or sysfs read-write is a known attack vector. +# See e.g. http://blog.zx2c4.com/749 and http://bit.ly/T9CkqJ +# We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only. +# We cannot mount them directly read-only, because that would prevent loading AppArmor profiles. +lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 +lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 + +{{if .ProcessConfig.Tty}} +lxc.mount.entry = {{.ProcessConfig.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 +{{end}} + +lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" ""}} 0 0 +lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" ""}} 0 0 + +{{range $value := .Mounts}} +{{if $value.Writable}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw 0 0 +{{else}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro 0 0 +{{end}} +{{end}} + +{{if .ProcessConfig.Privileged}} +{{if .AppArmor}} +lxc.aa_profile = unconfined +{{else}} +# Let AppArmor normal confinement take place (i.e., not unconfined) +{{end}} +{{end}} + +# limits +{{if .Resources}} +{{if .Resources.Memory}} +lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} +lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} +{{with $memSwap := getMemorySwap .Resources}} +lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} +{{end}} +{{end}} +{{if .Resources.CpuShares}} +lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} +{{end}} +{{if .Resources.Cpuset}} +lxc.cgroup.cpuset.cpus = {{.Resources.Cpuset}} +{{end}} +{{end}} + +{{if .LxcConfig}} +{{range $value := .LxcConfig}} +lxc.{{$value}} +{{end}} +{{end}} +` + +var LxcTemplateCompiled *template.Template + +// Escape spaces in strings according to the fstab documentation, which is the +// format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab". +func escapeFstabSpaces(field string) string { + return strings.Replace(field, " ", "\\040", -1) +} + +func getMemorySwap(v *execdriver.Resources) int64 { + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if v.MemorySwap < 0 { + return 0 + } + return v.Memory * 2 +} + +func getLabel(c map[string][]string, name string) string { + label := c["label"] + for _, l := range label { + parts := strings.SplitN(l, "=", 2) + if strings.TrimSpace(parts[0]) == name { + return strings.TrimSpace(parts[1]) + } + } + return "" +} + +func init() { + var err error + funcMap := template.FuncMap{ + "getMemorySwap": getMemorySwap, + "escapeFstabSpaces": escapeFstabSpaces, + "formatMountLabel": label.FormatMountLabel, + } + LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) + if err != nil { + panic(err) + } +} diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go new file mode 100644 index 00000000..900700b7 --- /dev/null +++ b/daemon/execdriver/lxc/lxc_template_unit_test.go @@ -0,0 +1,142 @@ +// +build linux + +package lxc + +import ( + "bufio" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/libcontainer/devices" +) + +func TestLXCConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestLXCConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + // Memory is allocated randomly for testing + rand.Seed(time.Now().UTC().UnixNano()) + var ( + memMin = 33554432 + memMax = 536870912 + mem = memMin + rand.Intn(memMax-memMin) + cpuMin = 100 + cpuMax = 10000 + cpu = cpuMin + rand.Intn(cpuMax-cpuMin) + ) + + driver, err := NewDriver(root, "", false) + if err != nil { + t.Fatal(err) + } + command := &execdriver.Command{ + ID: "1", + Resources: &execdriver.Resources{ + Memory: int64(mem), + CpuShares: int64(cpu), + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + AllowedDevices: make([]*devices.Device, 0), + ProcessConfig: execdriver.ProcessConfig{}, + } + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + grepFile(t, p, + fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) + + grepFile(t, p, + fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) +} + +func TestCustomLxcConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + driver, err := NewDriver(root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + ProcessConfig: processConfig, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + grepFile(t, p, "lxc.utsname = docker") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") +} + +func grepFile(t *testing.T, path string, pattern string) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := bufio.NewReader(f) + var ( + line string + ) + err = nil + for err == nil { + line, err = r.ReadString('\n') + if strings.Contains(line, pattern) == true { + return + } + } + t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) +} + +func TestEscapeFstabSpaces(t *testing.T) { + var testInputs = map[string]string{ + " ": "\\040", + "": "", + "/double space": "/double\\040\\040space", + "/some long test string": "/some\\040long\\040test\\040string", + "/var/lib/docker": "/var/lib/docker", + " leading": "\\040leading", + "trailing ": "trailing\\040", + } + for in, exp := range testInputs { + if out := escapeFstabSpaces(in); exp != out { + t.Logf("Expected %s got %s", exp, out) + t.Fail() + } + } +} diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go new file mode 100644 index 00000000..492247e4 --- /dev/null +++ b/daemon/execdriver/native/create.go @@ -0,0 +1,183 @@ +// +build linux,cgo + +package native + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/native/template" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/devices" + "github.com/docker/libcontainer/mount" + "github.com/docker/libcontainer/security/capabilities" +) + +// createContainer populates and configures the container type with the +// data provided by the execdriver.Command +func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) { + container := template.New() + + container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env) + container.Tty = c.ProcessConfig.Tty + container.User = c.ProcessConfig.User + container.WorkingDir = c.WorkingDir + container.Env = c.ProcessConfig.Env + container.Cgroups.Name = c.ID + container.Cgroups.AllowedDevices = c.AllowedDevices + container.MountConfig.DeviceNodes = c.AutoCreatedDevices + container.RootFs = c.Rootfs + + // check to see if we are running in ramdisk to disable pivot root + container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + container.RestrictSys = true + + if err := d.createNetwork(container, c); err != nil { + return nil, err + } + + if c.ProcessConfig.Privileged { + if err := d.setPrivileged(container); err != nil { + return nil, err + } + } else { + if err := d.setCapabilities(container, c); err != nil { + return nil, err + } + } + + if c.AppArmorProfile != "" { + container.AppArmorProfile = c.AppArmorProfile + } + + if err := d.setupCgroups(container, c); err != nil { + return nil, err + } + + if err := d.setupMounts(container, c); err != nil { + return nil, err + } + + if err := d.setupLabels(container, c); err != nil { + return nil, err + } + + cmds := make(map[string]*exec.Cmd) + d.Lock() + for k, v := range d.activeContainers { + cmds[k] = v.cmd + } + d.Unlock() + + return container, nil +} + +func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Command) error { + if c.Network.HostNetworking { + container.Namespaces["NEWNET"] = false + return nil + } + + container.Networks = []*libcontainer.Network{ + { + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), + Gateway: "localhost", + Type: "loopback", + }, + } + + if c.Network.Interface != nil { + vethNetwork := libcontainer.Network{ + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), + MacAddress: c.Network.Interface.MacAddress, + Gateway: c.Network.Interface.Gateway, + Type: "veth", + Bridge: c.Network.Interface.Bridge, + VethPrefix: "veth", + } + container.Networks = append(container.Networks, &vethNetwork) + } + + if c.Network.ContainerID != "" { + d.Lock() + active := d.activeContainers[c.Network.ContainerID] + d.Unlock() + + if active == nil || active.cmd.Process == nil { + return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) + } + cmd := active.cmd + + nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") + container.Networks = append(container.Networks, &libcontainer.Network{ + Type: "netns", + NsPath: nspath, + }) + } + + return nil +} + +func (d *driver) setPrivileged(container *libcontainer.Config) (err error) { + container.Capabilities = capabilities.GetAllCapabilities() + container.Cgroups.AllowAllDevices = true + + hostDeviceNodes, err := devices.GetHostDeviceNodes() + if err != nil { + return err + } + container.MountConfig.DeviceNodes = hostDeviceNodes + + container.RestrictSys = false + + if apparmor.IsEnabled() { + container.AppArmorProfile = "unconfined" + } + + return nil +} + +func (d *driver) setCapabilities(container *libcontainer.Config, c *execdriver.Command) (err error) { + container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop) + return err +} + +func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error { + if c.Resources != nil { + container.Cgroups.CpuShares = c.Resources.CpuShares + container.Cgroups.Memory = c.Resources.Memory + container.Cgroups.MemoryReservation = c.Resources.Memory + container.Cgroups.MemorySwap = c.Resources.MemorySwap + container.Cgroups.CpusetCpus = c.Resources.Cpuset + } + + return nil +} + +func (d *driver) setupMounts(container *libcontainer.Config, c *execdriver.Command) error { + for _, m := range c.Mounts { + container.MountConfig.Mounts = append(container.MountConfig.Mounts, &mount.Mount{ + Type: "bind", + Source: m.Source, + Destination: m.Destination, + Writable: m.Writable, + Private: m.Private, + Slave: m.Slave, + }) + } + + return nil +} + +func (d *driver) setupLabels(container *libcontainer.Config, c *execdriver.Command) error { + container.ProcessLabel = c.ProcessLabel + container.MountConfig.MountLabel = c.MountLabel + + return nil +} diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go new file mode 100644 index 00000000..3628d7b5 --- /dev/null +++ b/daemon/execdriver/native/driver.go @@ -0,0 +1,311 @@ +// +build linux,cgo + +package native + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/term" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" + consolepkg "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/namespaces" + _ "github.com/docker/libcontainer/namespaces/nsenter" + "github.com/docker/libcontainer/system" +) + +const ( + DriverName = "native" + Version = "0.2" +) + +type activeContainer struct { + container *libcontainer.Config + cmd *exec.Cmd +} + +type driver struct { + root string + initPath string + activeContainers map[string]*activeContainer + sync.Mutex +} + +func NewDriver(root, initPath string) (*driver, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + + // native driver root is at docker_root/execdriver/native. Put apparmor at docker_root + if err := apparmor.InstallDefaultProfile(); err != nil { + return nil, err + } + + return &driver{ + root: root, + initPath: initPath, + activeContainers: make(map[string]*activeContainer), + }, nil +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + // take the Command and populate the libcontainer.Config from it + container, err := d.createContainer(c) + if err != nil { + return -1, err + } + + var term execdriver.Terminal + + if c.ProcessConfig.Tty { + term, err = NewTtyConsole(&c.ProcessConfig, pipes) + } else { + term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) + } + if err != nil { + return -1, err + } + c.ProcessConfig.Terminal = term + + d.Lock() + d.activeContainers[c.ID] = &activeContainer{ + container: container, + cmd: &c.ProcessConfig.Cmd, + } + d.Unlock() + + var ( + dataPath = filepath.Join(d.root, c.ID) + args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...) + ) + + if err := d.createContainerRoot(c.ID); err != nil { + return -1, err + } + defer d.cleanContainer(c.ID) + + if err := d.writeContainerFile(container, c.ID); err != nil { + return -1, err + } + + return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { + c.ProcessConfig.Path = d.initPath + c.ProcessConfig.Args = append([]string{ + DriverName, + "-console", console, + "-pipe", "3", + "-root", filepath.Join(d.root, c.ID), + "--", + }, args...) + + // set this to nil so that when we set the clone flags anything else is reset + c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ + Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), + } + c.ProcessConfig.ExtraFiles = []*os.File{child} + + c.ProcessConfig.Env = container.Env + c.ProcessConfig.Dir = container.RootFs + + return &c.ProcessConfig.Cmd + }, func() { + if startCallback != nil { + c.ContainerPid = c.ProcessConfig.Process.Pid + startCallback(&c.ProcessConfig, c.ContainerPid) + } + }) +} + +func (d *driver) Kill(p *execdriver.Command, sig int) error { + return syscall.Kill(p.ProcessConfig.Process.Pid, syscall.Signal(sig)) +} + +func (d *driver) Pause(c *execdriver.Command) error { + active := d.activeContainers[c.ID] + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + active.container.Cgroups.Freezer = "FROZEN" + if systemd.UseSystemd() { + return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) + } + return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) +} + +func (d *driver) Unpause(c *execdriver.Command) error { + active := d.activeContainers[c.ID] + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + active.container.Cgroups.Freezer = "THAWED" + if systemd.UseSystemd() { + return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) + } + return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) +} + +func (d *driver) Terminate(p *execdriver.Command) error { + // lets check the start time for the process + state, err := libcontainer.GetState(filepath.Join(d.root, p.ID)) + if err != nil { + if !os.IsNotExist(err) { + return err + } + // TODO: Remove this part for version 1.2.0 + // This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0 + data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start")) + if err != nil { + // if we don't have the data on disk then we can assume the process is gone + // because this is only removed after we know the process has stopped + if os.IsNotExist(err) { + return nil + } + return err + } + state = &libcontainer.State{InitStartTime: string(data)} + } + + currentStartTime, err := system.GetProcessStartTime(p.ProcessConfig.Process.Pid) + if err != nil { + return err + } + + if state.InitStartTime == currentStartTime { + err = syscall.Kill(p.ProcessConfig.Process.Pid, 9) + syscall.Wait4(p.ProcessConfig.Process.Pid, nil, 0, nil) + } + d.cleanContainer(p.ID) + + return err + +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) Name() string { + return fmt.Sprintf("%s-%s", DriverName, Version) +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + d.Lock() + active := d.activeContainers[id] + d.Unlock() + + if active == nil { + return nil, fmt.Errorf("active container for %s does not exist", id) + } + c := active.container.Cgroups + + if systemd.UseSystemd() { + return systemd.GetPids(c) + } + return fs.GetPids(c) +} + +func (d *driver) writeContainerFile(container *libcontainer.Config, id string) error { + data, err := json.Marshal(container) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) +} + +func (d *driver) cleanContainer(id string) error { + d.Lock() + delete(d.activeContainers, id) + d.Unlock() + return os.RemoveAll(filepath.Join(d.root, id, "container.json")) +} + +func (d *driver) createContainerRoot(id string) error { + return os.MkdirAll(filepath.Join(d.root, id), 0655) +} + +func (d *driver) Clean(id string) error { + return os.RemoveAll(filepath.Join(d.root, id)) +} + +func getEnv(key string, env []string) string { + for _, pair := range env { + parts := strings.Split(pair, "=") + if parts[0] == key { + return parts[1] + } + } + return "" +} + +type TtyConsole struct { + MasterPty *os.File +} + +func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) { + ptyMaster, console, err := consolepkg.CreateMasterAndConsole() + if err != nil { + return nil, err + } + + tty := &TtyConsole{ + MasterPty: ptyMaster, + } + + if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + + processConfig.Console = console + + return tty, nil +} + +func (t *TtyConsole) Master() *os.File { + return t.MasterPty +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + go func() { + io.Copy(t.MasterPty, pipes.Stdin) + + pipes.Stdin.Close() + }() + } + + return nil +} + +func (t *TtyConsole) Close() error { + return t.MasterPty.Close() +} diff --git a/daemon/execdriver/native/driver_unsupported.go b/daemon/execdriver/native/driver_unsupported.go new file mode 100644 index 00000000..97839cf3 --- /dev/null +++ b/daemon/execdriver/native/driver_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package native + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("native driver not supported on non-linux") +} diff --git a/daemon/execdriver/native/driver_unsupported_nocgo.go b/daemon/execdriver/native/driver_unsupported_nocgo.go new file mode 100644 index 00000000..2b8e9f81 --- /dev/null +++ b/daemon/execdriver/native/driver_unsupported_nocgo.go @@ -0,0 +1,13 @@ +// +build linux,!cgo + +package native + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("native driver not supported on non-linux") +} diff --git a/daemon/execdriver/native/exec.go b/daemon/execdriver/native/exec.go new file mode 100644 index 00000000..84ad0967 --- /dev/null +++ b/daemon/execdriver/native/exec.go @@ -0,0 +1,70 @@ +// +build linux + +package native + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" +) + +const execCommandName = "nsenter-exec" + +func init() { + reexec.Register(execCommandName, nsenterExec) +} + +func nsenterExec() { + runtime.LockOSThread() + + // User args are passed after '--' in the command line. + userArgs := findUserArgs() + + config, err := loadConfigFromFd() + if err != nil { + log.Fatalf("docker-exec: unable to receive config from sync pipe: %s", err) + } + + if err := namespaces.FinalizeSetns(config, userArgs); err != nil { + log.Fatalf("docker-exec: failed to exec: %s", err) + } +} + +// TODO(vishh): Add support for running in priviledged mode and running as a different user. +func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + active := d.activeContainers[c.ID] + if active == nil { + return -1, fmt.Errorf("No active container exists with ID %s", c.ID) + } + state, err := libcontainer.GetState(filepath.Join(d.root, c.ID)) + if err != nil { + return -1, fmt.Errorf("State unavailable for container with ID %s. The container may have been cleaned up already. Error: %s", c.ID, err) + } + + var term execdriver.Terminal + + if processConfig.Tty { + term, err = NewTtyConsole(processConfig, pipes) + } else { + term, err = execdriver.NewStdConsole(processConfig, pipes) + } + + processConfig.Terminal = term + + args := append([]string{processConfig.Entrypoint}, processConfig.Arguments...) + + return namespaces.ExecIn(active.container, state, args, os.Args[0], "exec", processConfig.Stdin, processConfig.Stdout, processConfig.Stderr, processConfig.Console, + func(cmd *exec.Cmd) { + if startCallback != nil { + startCallback(&c.ProcessConfig, cmd.Process.Pid) + } + }) +} diff --git a/daemon/execdriver/native/info.go b/daemon/execdriver/native/info.go new file mode 100644 index 00000000..601b97e8 --- /dev/null +++ b/daemon/execdriver/native/info.go @@ -0,0 +1,30 @@ +// +build linux,cgo + +package native + +import ( + "os" + "path/filepath" + + "github.com/docker/libcontainer" +) + +type info struct { + ID string + driver *driver +} + +// IsRunning is determined by looking for the +// pid file for a container. If the file exists then the +// container is currently running +func (i *info) IsRunning() bool { + if _, err := libcontainer.GetState(filepath.Join(i.driver.root, i.ID)); err == nil { + return true + } + // TODO: Remove this part for version 1.2.0 + // This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0 + if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil { + return true + } + return false +} diff --git a/daemon/execdriver/native/init.go b/daemon/execdriver/native/init.go new file mode 100644 index 00000000..c1c988d9 --- /dev/null +++ b/daemon/execdriver/native/init.go @@ -0,0 +1,66 @@ +// +build linux + +package native + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" + "github.com/docker/libcontainer/syncpipe" +) + +func init() { + reexec.Register(DriverName, initializer) +} + +func initializer() { + runtime.LockOSThread() + + var ( + pipe = flag.Int("pipe", 0, "sync pipe fd") + console = flag.String("console", "", "console (pty slave) path") + root = flag.String("root", ".", "root path for configuration files") + ) + + flag.Parse() + + var container *libcontainer.Config + f, err := os.Open(filepath.Join(*root, "container.json")) + if err != nil { + writeError(err) + } + + if err := json.NewDecoder(f).Decode(&container); err != nil { + f.Close() + writeError(err) + } + f.Close() + + rootfs, err := os.Getwd() + if err != nil { + writeError(err) + } + + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(*pipe)) + if err != nil { + writeError(err) + } + + if err := namespaces.Init(container, rootfs, *console, syncPipe, flag.Args()); err != nil { + writeError(err) + } + + panic("Unreachable") +} + +func writeError(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go new file mode 100644 index 00000000..be3dd5a5 --- /dev/null +++ b/daemon/execdriver/native/template/default_template.go @@ -0,0 +1,47 @@ +package template + +import ( + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/cgroups" +) + +// New returns the docker default configuration for libcontainer +func New() *libcontainer.Config { + container := &libcontainer.Config{ + Capabilities: []string{ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", + "AUDIT_WRITE", + }, + Namespaces: map[string]bool{ + "NEWNS": true, + "NEWUTS": true, + "NEWIPC": true, + "NEWPID": true, + "NEWNET": true, + }, + Cgroups: &cgroups.Cgroup{ + Parent: "docker", + AllowAllDevices: false, + }, + MountConfig: &libcontainer.MountConfig{}, + } + + if apparmor.IsEnabled() { + container.AppArmorProfile = "docker-default" + } + + return container +} diff --git a/daemon/execdriver/native/utils.go b/daemon/execdriver/native/utils.go new file mode 100644 index 00000000..e337cf43 --- /dev/null +++ b/daemon/execdriver/native/utils.go @@ -0,0 +1,35 @@ +// +build linux + +package native + +import ( + "os" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/syncpipe" +) + +func findUserArgs() []string { + for i, a := range os.Args { + if a == "--" { + return os.Args[i+1:] + } + } + return []string{} +} + +// loadConfigFromFd loads a container's config from the sync pipe that is provided by +// fd 3 when running a process +func loadConfigFromFd() (*libcontainer.Config, error) { + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) + if err != nil { + return nil, err + } + + var config *libcontainer.Config + if err := syncPipe.ReadFromParent(&config); err != nil { + return nil, err + } + + return config, nil +} diff --git a/daemon/execdriver/pipes.go b/daemon/execdriver/pipes.go new file mode 100644 index 00000000..158219f0 --- /dev/null +++ b/daemon/execdriver/pipes.go @@ -0,0 +1,23 @@ +package execdriver + +import ( + "io" +) + +// Pipes is a wrapper around a containers output for +// stdin, stdout, stderr +type Pipes struct { + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { + p := &Pipes{ + Stdout: stdout, + Stderr: stderr, + } + if useStdin { + p.Stdin = stdin + } + return p +} diff --git a/daemon/execdriver/termconsole.go b/daemon/execdriver/termconsole.go new file mode 100644 index 00000000..4dc18e57 --- /dev/null +++ b/daemon/execdriver/termconsole.go @@ -0,0 +1,46 @@ +package execdriver + +import ( + "io" + "os/exec" +) + +type StdConsole struct { +} + +func NewStdConsole(processConfig *ProcessConfig, pipes *Pipes) (*StdConsole, error) { + std := &StdConsole{} + + if err := std.AttachPipes(&processConfig.Cmd, pipes); err != nil { + return nil, err + } + return std, nil +} + +func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { + command.Stdout = pipes.Stdout + command.Stderr = pipes.Stderr + + if pipes.Stdin != nil { + stdin, err := command.StdinPipe() + if err != nil { + return err + } + + go func() { + defer stdin.Close() + io.Copy(stdin, pipes.Stdin) + }() + } + return nil +} + +func (s *StdConsole) Resize(h, w int) error { + // we do not need to reside a non tty + return nil +} + +func (s *StdConsole) Close() error { + // nothing to close here + return nil +} diff --git a/daemon/execdriver/utils.go b/daemon/execdriver/utils.go new file mode 100644 index 00000000..37042ef8 --- /dev/null +++ b/daemon/execdriver/utils.go @@ -0,0 +1,63 @@ +package execdriver + +import ( + "fmt" + "strings" + + "github.com/docker/docker/utils" + "github.com/docker/libcontainer/security/capabilities" +) + +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = capabilities.GetAllCapabilities() + ) + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + if !utils.StringsContainsNoCase(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if utils.StringsContainsNoCase(adds, "all") { + basics = capabilities.GetAllCapabilities() + } + + if !utils.StringsContainsNoCase(drops, "all") { + for _, cap := range basics { + // skip `all` aready handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !utils.StringsContainsNoCase(drops, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` aready handled above + if strings.ToLower(cap) == "all" { + continue + } + + if !utils.StringsContainsNoCase(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !utils.StringsContainsNoCase(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + + return newCaps, nil +} diff --git a/daemon/export.go b/daemon/export.go new file mode 100644 index 00000000..bc0f14a3 --- /dev/null +++ b/daemon/export.go @@ -0,0 +1,30 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s container_id", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + data, err := container.Export() + if err != nil { + return job.Errorf("%s: %s", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Errorf("%s: %s", name, err) + } + // FIXME: factor job-specific LogEvent to engine.Job.Run() + container.LogEvent("export") + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 00000000..bbaad08e --- /dev/null +++ b/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,450 @@ +/* + +aufs driver directory structure + +. +├── layers // Metadata of layers +│   ├── 1 +│   ├── 2 +│   └── 3 +├── diff // Content of the layer +│   ├── 1 // Contains layers that need to be mounted for the id +│   ├── 2 +│   └── 3 +└── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "path" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/log" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/libcontainer/label" +) + +var ( + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + incompatibleFsMagic = []graphdriver.FsMagic{ + graphdriver.FsMagicBtrfs, + graphdriver.FsMagicAufs, + } +) + +func init() { + graphdriver.Register("aufs", Init) +} + +type Driver struct { + root string + sync.Mutex // Protects concurrent modification to active + active map[string]int +} + +// New returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string) (graphdriver.Driver, error) { + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + rootdir := path.Dir(root) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, fmt.Errorf("Couldn't stat the root directory: %s", err) + } + + for _, magic := range incompatibleFsMagic { + if graphdriver.FsMagic(buf.Type) == magic { + return nil, graphdriver.ErrIncompatibleFS + } + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + active: make(map[string]int), + } + + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := os.MkdirAll(root, 0755); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := graphdriver.MakePrivate(root); err != nil { + return nil, err + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { + return nil, err + } + } + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a Driver) rootPath() string { + return a.root +} + +func (Driver) String() string { + return "aufs" +} + +func (a Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + } +} + +// Exists returns true if the given id is registered with +// this driver +func (a Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// Three folders are created for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string) error { + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIds(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + return nil +} + +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { + return err + } + } + return nil +} + +// Unmount and remove the dir information +func (a *Driver) Remove(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if a.active[id] != 0 { + log.Errorf("Warning: removing active id %s", id) + } + + // Make sure the dir is umounted first + if err := a.unmount(id); err != nil { + return err + } + tmpDirs := []string{ + "mnt", + "diff", + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + for _, p := range tmpDirs { + + realPath := path.Join(a.rootPath(), p, id) + tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) + if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpPath) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Return the rootfs path for the id +// This will mount the dir at it's given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + ids, err := getParentIds(a.rootPath(), id) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + ids = []string{} + } + + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + count := a.active[id] + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + out := path.Join(a.rootPath(), "diff", id) + if len(ids) > 0 { + out = path.Join(a.rootPath(), "mnt", id) + + if count == 0 { + if err := a.mount(id, mountLabel); err != nil { + return "", err + } + } + } + + a.active[id] = count + 1 + + return out, nil +} + +func (a *Driver) Put(id string) { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if count := a.active[id]; count > 1 { + a.active[id] = count - 1 + } else { + ids, _ := getParentIds(a.rootPath(), id) + // We only mounted if there are any parents + if ids != nil && len(ids) > 0 { + a.unmount(id) + } + delete(a.active, id) + } +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (archive.Archive, error) { + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + }) +} + +func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error { + return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (bytes int64, err error) { + // AUFS doesn't need the parent layer to calculate the diff size. + return utils.TreeSize(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) { + // AUFS doesn't need the parent id to apply the diff. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIds(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id, mountLabel string) error { + // If the id is mounted or we get an error return + if mounted, err := a.mounted(id); err != nil || mounted { + return err + } + + var ( + target = path.Join(a.rootPath(), "mnt", id) + rw = path.Join(a.rootPath(), "diff", id) + ) + + layers, err := a.getParentLayerPaths(id) + if err != nil { + return err + } + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return err + } + return nil +} + +func (a *Driver) unmount(id string) error { + if mounted, err := a.mounted(id); err != nil || !mounted { + return err + } + target := path.Join(a.rootPath(), "mnt", id) + return Unmount(target) +} + +func (a *Driver) mounted(id string) (bool, error) { + target := path.Join(a.rootPath(), "mnt", id) + return mountpk.Mounted(target) +} + +// During cleanup aufs needs to unmount all mountpoints +func (a *Driver) Cleanup() error { + ids, err := loadIds(path.Join(a.rootPath(), "layers")) + if err != nil { + return err + } + + for _, id := range ids { + if err := a.unmount(id); err != nil { + log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) + } + } + + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + if err = a.tryMount(ro, rw, target, mountLabel); err != nil { + if err = a.mountRw(rw, target, mountLabel); err != nil { + return + } + + for _, layer := range ro { + data := label.FormatMountLabel(fmt.Sprintf("append:%s=ro+wh", layer), mountLabel) + if err = mount("none", target, "aufs", MsRemount, data); err != nil { + return + } + } + } + return +} + +// Try to mount using the aufs fast path, if this fails then +// append ro layers. +func (a *Driver) tryMount(ro []string, rw, target, mountLabel string) (err error) { + var ( + rwBranch = fmt.Sprintf("%s=rw", rw) + roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) + data = label.FormatMountLabel(fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches), mountLabel) + ) + return mount("none", target, "aufs", 0, data) +} + +func (a *Driver) mountRw(rw, target, mountLabel string) error { + data := label.FormatMountLabel(fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw), mountLabel) + return mount("none", target, "aufs", 0, data) +} + +func rollbackMount(target string, err error) { + if err != nil { + Unmount(target) + } +} diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 00000000..0c940bc9 --- /dev/null +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,703 @@ +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +var ( + tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t *testing.T) graphdriver.Driver { + d, err := Init(dir, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t *testing.T) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatalf("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + response, err := d.mounted("1") + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueReponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatalf("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker"); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatalf("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id name should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[1] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func TestMountMoreThan42Layers(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.Create(current, parent); err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Fatal(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Fatal(err) + } + if len(files) != expected { + t.Fatalf("Expected %d got %d", expected, len(files)) + } +} diff --git a/daemon/graphdriver/aufs/dirs.go b/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 00000000..fb9b81ed --- /dev/null +++ b/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,46 @@ +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIds(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} diff --git a/daemon/graphdriver/aufs/migrate.go b/daemon/graphdriver/aufs/migrate.go new file mode 100644 index 00000000..dda7cb73 --- /dev/null +++ b/daemon/graphdriver/aufs/migrate.go @@ -0,0 +1,194 @@ +package aufs + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" +) + +type metadata struct { + ID string `json:"id"` + ParentID string `json:"parent,omitempty"` + Image string `json:"Image,omitempty"` + + parent *metadata +} + +func pathExists(pth string) bool { + if _, err := os.Stat(pth); err != nil { + return false + } + return true +} + +// Migrate existing images and containers from docker < 0.7.x +// +// The format pre 0.7 is for docker to store the metadata and filesystem +// content in the same directory. For the migration to work we need to move Image layer +// data from /var/lib/docker/graph//layers to the diff of the registered id. +// +// Next we need to migrate the container's rw layer to diff of the driver. After the +// contents are migrated we need to register the image and container ids with the +// driver. +// +// For the migration we try to move the folder containing the layer files, if that +// fails because the data is currently mounted we will fallback to creating a +// symlink. +func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { + if pathExists(path.Join(pth, "graph")) { + if err := a.migrateRepositories(pth); err != nil { + return err + } + if err := a.migrateImages(path.Join(pth, "graph")); err != nil { + return err + } + return a.migrateContainers(path.Join(pth, "containers"), setupInit) + } + return nil +} + +func (a *Driver) migrateRepositories(pth string) error { + name := path.Join(pth, "repositories") + if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { + if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { + return err + } + + if !a.Exists(id) { + + metadata, err := loadMetadata(path.Join(pth, id, "config.json")) + if err != nil { + return err + } + + initID := fmt.Sprintf("%s-init", id) + if err := a.Create(initID, metadata.Image); err != nil { + return err + } + + initPath, err := a.Get(initID, "") + if err != nil { + return err + } + // setup init layer + if err := setupInit(initPath); err != nil { + return err + } + + if err := a.Create(id, initID); err != nil { + return err + } + } + } + } + return nil +} + +func (a *Driver) migrateImages(pth string) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + var ( + m = make(map[string]*metadata) + current *metadata + exists bool + ) + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { + if current, exists = m[id]; !exists { + current, err = loadMetadata(path.Join(pth, id, "json")) + if err != nil { + return err + } + m[id] = current + } + } + } + + for _, v := range m { + v.parent = m[v.ParentID] + } + + migrated := make(map[string]bool) + for _, v := range m { + if err := a.migrateImage(v, pth, migrated); err != nil { + return err + } + } + return nil +} + +func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { + if !migrated[m.ID] { + if m.parent != nil { + a.migrateImage(m.parent, pth, migrated) + } + if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { + return err + } + if !a.Exists(m.ID) { + if err := a.Create(m.ID, m.ParentID); err != nil { + return err + } + } + migrated[m.ID] = true + } + return nil +} + +// tryRelocate will try to rename the old path to the new pack and if +// the operation fails, it will fallback to a symlink +func tryRelocate(oldPath, newPath string) error { + s, err := os.Lstat(newPath) + if err != nil && !os.IsNotExist(err) { + return err + } + // If the destination is a symlink then we already tried to relocate once before + // and it failed so we delete it and try to remove + if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink { + if err := os.RemoveAll(newPath); err != nil { + return err + } + } + if err := os.Rename(oldPath, newPath); err != nil { + if sErr := os.Symlink(oldPath, newPath); sErr != nil { + return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) + } + } + return nil +} + +func loadMetadata(pth string) (*metadata, error) { + f, err := os.Open(pth) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + out = &metadata{} + dec = json.NewDecoder(f) + ) + + if err := dec.Decode(out); err != nil { + return nil, err + } + return out, nil +} diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go new file mode 100644 index 00000000..fa74e05b --- /dev/null +++ b/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,18 @@ +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + log.Errorf("[warning]: couldn't run auplink before unmount: %s", err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/daemon/graphdriver/aufs/mount_linux.go b/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 00000000..c86f1bbd --- /dev/null +++ b/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,9 @@ +package aufs + +import "syscall" + +const MsRemount = syscall.MS_REMOUNT + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/daemon/graphdriver/aufs/mount_unsupported.go b/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 00000000..e291bef3 --- /dev/null +++ b/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package aufs + +import "errors" + +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on darwin") +} diff --git a/daemon/graphdriver/btrfs/MAINTAINERS b/daemon/graphdriver/btrfs/MAINTAINERS new file mode 100644 index 00000000..9e629d5f --- /dev/null +++ b/daemon/graphdriver/btrfs/MAINTAINERS @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 00000000..26102aa1 --- /dev/null +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,225 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/mount" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + rootdir := path.Dir(home) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, err + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + if err := os.MkdirAll(home, 0700); err != nil { + return nil, err + } + + if err := graphdriver.MakePrivate(home); err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + } + + return graphdriver.NaiveDiffDriver(driver), nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "btrfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func subvolDelete(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirId(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) Create(id string, parent string) error { + subvolumes := path.Join(d.home, "subvolumes") + if err := os.MkdirAll(subvolumes, 0700); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir, err := d.Get(parent, "") + if err != nil { + return err + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + return nil +} + +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirId(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirId(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +func (d *Driver) Put(id string) { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. +} + +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirId(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/daemon/graphdriver/btrfs/btrfs_test.go b/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 00000000..cde23ce4 --- /dev/null +++ b/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,28 @@ +package btrfs + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/btrfs/dummy_unsupported.go b/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 00000000..f0708888 --- /dev/null +++ b/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/daemon/graphdriver/devmapper/MAINTAINERS b/daemon/graphdriver/devmapper/MAINTAINERS new file mode 100644 index 00000000..9e629d5f --- /dev/null +++ b/daemon/graphdriver/devmapper/MAINTAINERS @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md new file mode 100644 index 00000000..c4262024 --- /dev/null +++ b/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,156 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. For each devicemapper +graph location (typically `/var/lib/docker/devicemapper`, $graph below) +a thin pool is created based on two block devices, one for data and +one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are `$graph/devicemapper/data` and +`$graph/devicemapper/metadata`. Additional metadata required to map +from docker entities to the corresponding devicemapper volumes is +stored in the `$graph/devicemapper/json` file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the $graph directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker -d --storage-opt dm.foo=bar`. + +Here is the list of supported options: + + * `dm.basesize` + + Specifies the size to use when creating the base device, which + limits the size of images and containers. The default value is + 10G. Note, thin devices are inherently "sparse", so a 10G device + which is mostly empty doesn't use 10 GB of space on the + pool. However, the filesystem will use more space for the empty + case the larger the device is. **Warning**: This value affects the + system-wide "base" empty filesystem that may already be + initialized and inherited by pulled images. Typically, a change + to this value will require additional steps to take effect: 1) + stop `docker -d`, 2) `rm -rf /var/lib/docker`, 3) start `docker -d`. + + Example use: + + ``docker -d --storage-opt dm.basesize=20G`` + + * `dm.loopdatasize` + + Specifies the size to use when creating the loopback file for the + "data" device which is used for the thin pool. The default size is + 100G. Note that the file is sparse, so it will not initially take + up this much space. + + Example use: + + ``docker -d --storage-opt dm.loopdatasize=200G`` + + * `dm.loopmetadatasize` + + Specifies the size to use when creating the loopback file for the + "metadadata" device which is used for the thin pool. The default size is + 2G. Note that the file is sparse, so it will not initially take + up this much space. + + Example use: + + ``docker -d --storage-opt dm.loopmetadatasize=4G`` + + * `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "ext4" + + Example use: + + ``docker -d --storage-opt dm.fs=xfs`` + + * `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + ``docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"`` + + * `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + ``docker -d --storage-opt dm.mountopt=nodiscard`` + + * `dm.datadev` + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both + datadev and metadatadev should be specified to completely avoid + using the loopback device. + + Example use: + + ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + + * `dm.metadatadev` + + Specifies a custom blockdevice to use for metadata for the thin + pool. + + For best performance the metadata should be on a different spindle + than the data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This + can be achieved by zeroing the first 4k to indicate empty + metadata, like this: + + ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1``` + + Example use: + + ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + + * `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + ``docker -d --storage-opt dm.blocksize=512K`` + + * `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing + devicemapper devices. This is enabled by default (only) if using + loopback devices and is required to res-parsify the loopback file + on image/container removal. + + Disabling this on loopback can lead to *much* faster container + removal times, but will make the space used in /var/lib/docker + directory not be returned to the system for other use when + containers are removed. + + Example use: + + ``docker -d --storage-opt dm.blkdiscard=false`` diff --git a/daemon/graphdriver/devmapper/attach_loopback.go b/daemon/graphdriver/devmapper/attach_loopback.go new file mode 100644 index 00000000..9cfa18a4 --- /dev/null +++ b/daemon/graphdriver/devmapper/attach_loopback.go @@ -0,0 +1,129 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "os" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + log.Errorf("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + log.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + log.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + log.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// attachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func attachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start loopking for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + log.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + log.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &LoopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + log.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + log.Errorf("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 00000000..ccaea018 --- /dev/null +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,1253 @@ +// +build linux + +package devmapper + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/units" + "github.com/docker/libcontainer/label" +) + +var ( + DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors +) + +type DevInfo struct { + Hash string `json:"-"` + DeviceId int `json:"device_id"` + Size uint64 `json:"size"` + TransactionId uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + devices *DeviceSet `json:"-"` + + mountCount int `json:"-"` + mountPath string `json:"-"` + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be aquired *before* the device lock, and + // multiple device locks should be aquired parent before child. + lock sync.Mutex `json:"-"` +} + +type MetaData struct { + Devices map[string]*DevInfo `json:"Devices"` + devicesLock sync.Mutex `json:"-"` // Protects all read/writes to Devices map +} + +type DeviceSet struct { + MetaData + sync.Mutex // Protects Devices map and serializes calls into libdevmapper + root string + devicePrefix string + TransactionId uint64 + NewTransactionId uint64 + nextDeviceId int + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string + metadataDevice string + doBlkDiscard bool + thinpBlockSize uint32 +} + +type DiskUsage struct { + Used uint64 + Total uint64 +} + +type Status struct { + PoolName string + DataLoopback string + MetadataLoopback string + Data DiskUsage + Metadata DiskUsage + SectorSize uint64 +} + +type DevStatus struct { + DeviceId int + Size uint64 + TransactionId uint64 + SizeInSectors uint64 + MappedSectors uint64 + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *DevInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *DevInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *DevInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + return devices.devicePrefix + "-pool" +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists, it does nothing. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { + return "", err + } + + if _, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + log.Debugf("Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err = file.Truncate(size); err != nil { + return "", err + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionId() uint64 { + devices.NewTransactionId = devices.NewTransactionId + 1 + return devices.NewTransactionId +} + +func (devices *DeviceSet) removeMetadata(info *DevInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +func (devices *DeviceSet) saveMetadata(info *DevInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + if devices.NewTransactionId != devices.TransactionId { + if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { + return fmt.Errorf("Error setting devmapper transition ID: %s", err) + } + devices.TransactionId = devices.NewTransactionId + } + return nil +} + +func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { + devices.devicesLock.Lock() + defer devices.devicesLock.Unlock() + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { + log.Debugf("registerDevice(%v, %v)", id, hash) + info := &DevInfo{ + Hash: hash, + DeviceId: id, + Size: size, + TransactionId: devices.allocateTransactionId(), + Initialized: false, + devices: devices, + } + + devices.devicesLock.Lock() + devices.Devices[hash] = info + devices.devicesLock.Unlock() + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + devices.devicesLock.Lock() + delete(devices.Devices, hash) + devices.devicesLock.Unlock() + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { + log.Debugf("activateDeviceIfNeeded(%v)", info.Hash) + + if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) +} + +func (devices *DeviceSet) createFilesystem(info *DevInfo) error { + devname := info.DevName() + + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + + var err error + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("Unsupported filesystem type %s", devices.filesystem) + } + if err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) initMetaData() error { + _, _, _, params, err := getStatus(devices.getPoolName()) + if err != nil { + return err + } + + if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { + return err + } + devices.NewTransactionId = devices.TransactionId + + // Migrate old metadatafile + + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := MetaData{Devices: make(map[string]*DevInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId <= devices.TransactionId { + devices.saveMetadata(info) + } + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { + info := &DevInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + return nil + } + + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId > devices.TransactionId { + return nil + } + + return info +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDevice("") + if oldInfo != nil && oldInfo.Initialized { + return nil + } + + if oldInfo != nil && !oldInfo.Initialized { + log.Debugf("Removing uninitialized base image") + if err := devices.deleteDevice(oldInfo); err != nil { + return err + } + } + + log.Debugf("Initializing base device-manager snapshot") + + id := devices.nextDeviceId + + // Create initial device + if err := createDevice(devices.getPoolDevName(), &id); err != nil { + return err + } + + // Ids are 24bit, so wrap around + devices.nextDeviceId = (id + 1) & 0xffffff + + log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) + info, err := devices.registerDevice(id, "", devices.baseFsSize) + if err != nil { + _ = deleteDevice(devices.getPoolDevName(), id) + return err + } + + log.Debugf("Creating filesystem on base device-manager snapshot") + + if err = devices.activateDeviceIfNeeded(info); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err = devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { + if level >= 7 { + return // Ignore _LOG_DEBUG + } + + log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("Can't shrink file") + } + + dataloopback := FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := LoopbackSetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := suspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("Unable to reload pool: %s", err) + } + + // Resume the pool + if err := resumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + logInit(devices) + + _, err := getDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + log.Debugf("Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the device -pool + log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) + info, err := getInfo(devices.getPoolName()) + if info == nil { + log.Debugf("Error device getInfo: %s", err) + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if info.Exists == 0 { + log.Debugf("Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + log.Debugf("Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = attachLoopDevice(data) + if err != nil { + return err + } + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + log.Debugf("Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = attachLoopDevice(metadata) + if err != nil { + return err + } + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err = devices.initMetaData(); err != nil { + return err + } + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + log.Debugf("Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +func (devices *DeviceSet) AddDevice(hash, baseHash string) error { + baseInfo, err := devices.lookupDevice(baseHash) + if err != nil { + return err + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("device %s already exists", hash) + } + + deviceId := devices.nextDeviceId + + if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + log.Debugf("Error creating snap device: %s", err) + return err + } + + // Ids are 24bit, so wrap around + devices.nextDeviceId = (deviceId + 1) & 0xffffff + + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { + deleteDevice(devices.getPoolDevName(), deviceId) + log.Debugf("Error registering device: %s", err) + return err + } + return nil +} + +func (devices *DeviceSet) deleteDevice(info *DevInfo) error { + if devices.doBlkDiscard { + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually + if err := devices.activateDeviceIfNeeded(info); err == nil { + if err := BlockDeviceDiscard(info.DevName()); err != nil { + log.Debugf("Error discarding block on device: %s (ignoring)", err) + } + } + } + + devinfo, _ := getInfo(info.Name()) + if devinfo != nil && devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { + log.Debugf("Error removing device: %s", err) + return err + } + } + + if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { + log.Debugf("Error deleting device: %s", err) + return err + } + + devices.allocateTransactionId() + devices.devicesLock.Lock() + delete(devices.Devices, info.Hash) + devices.devicesLock.Unlock() + + if err := devices.removeMetadata(info); err != nil { + devices.devicesLock.Lock() + devices.Devices[info.Hash] = info + devices.devicesLock.Unlock() + log.Debugf("Error removing meta data: %s", err) + return err + } + + return nil +} + +func (devices *DeviceSet) DeleteDevice(hash string) error { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info) +} + +func (devices *DeviceSet) deactivatePool() error { + log.Debugf("[devmapper] deactivatePool()") + defer log.Debugf("[devmapper] deactivatePool END") + devname := devices.getPoolDevName() + devinfo, err := getInfo(devname) + if err != nil { + return err + } + if devinfo.Exists != 0 { + return removeDevice(devname) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { + log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) + defer log.Debugf("[devmapper] deactivateDevice END") + + // Wait for the unmount to be effective, + // by watching the value of Info.OpenCount for the device + if err := devices.waitClose(info); err != nil { + log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err) + } + + devinfo, err := getInfo(info.Name()) + if err != nil { + return err + } + if devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { + return err + } + } + + return nil +} + +// Issues the underlying dm remove operation and then waits +// for it to finish. +func (devices *DeviceSet) removeDeviceAndWait(devname string) error { + var err error + + for i := 0; i < 1000; i++ { + err = removeDevice(devname) + if err == nil { + break + } + if err != ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if err != nil { + return err + } + + if err := devices.waitRemove(devname); err != nil { + return err + } + return nil +} + +// waitRemove blocks until either: +// a) the device registered at - is removed, +// or b) the 10 second timeout expires. +func (devices *DeviceSet) waitRemove(devname string) error { + log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) + defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) + i := 0 + for ; i < 1000; i++ { + devinfo, err := getInfo(devname) + if err != nil { + // If there is an error we assume the device doesn't exist. + // The error might actually be something else, but we can't differentiate. + return nil + } + if i%100 == 0 { + log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) + } + if devinfo.Exists == 0 { + break + } + + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if i == 1000 { + return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) + } + return nil +} + +// waitClose blocks until either: +// a) the device registered at - is closed, +// or b) the 10 second timeout expires. +func (devices *DeviceSet) waitClose(info *DevInfo) error { + i := 0 + for ; i < 1000; i++ { + devinfo, err := getInfo(info.Name()) + if err != nil { + return err + } + if i%100 == 0 { + log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount) + } + if devinfo.OpenCount == 0 { + break + } + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if i == 1000 { + return fmt.Errorf("Timeout while waiting for device %s to close", info.Hash) + } + return nil +} + +func (devices *DeviceSet) Shutdown() error { + + log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) + log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) + defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) + + var devs []*DevInfo + + devices.devicesLock.Lock() + for _, info := range devices.Devices { + devs = append(devs, info) + } + devices.devicesLock.Unlock() + + for _, info := range devs { + info.lock.Lock() + if info.mountCount > 0 { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { + log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err) + } + + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err) + } + devices.Unlock() + } + info.lock.Unlock() + } + + info, _ := devices.lookupDevice("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + log.Debugf("Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if err := devices.deactivatePool(); err != nil { + log.Debugf("Shutdown deactivate pool , error: %s", err) + } + devices.Unlock() + + return nil +} + +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount > 0 { + if path != info.mountPath { + return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) + } + + info.mountCount++ + return nil + } + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + var flags uintptr = syscall.MS_MGC_VAL + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + err = syscall.Mount(info.DevName(), path, fstype, flags, joinMountOptions("discard", options)) + if err != nil && err == syscall.EINVAL { + err = syscall.Mount(info.DevName(), path, fstype, flags, options) + } + if err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + info.mountCount = 1 + info.mountPath = path + + return nil +} + +func (devices *DeviceSet) UnmountDevice(hash string) error { + log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) + defer log.Debugf("[devmapper] UnmountDevice END") + + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount == 0 { + return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) + } + + info.mountCount-- + if info.mountCount > 0 { + return nil + } + + log.Debugf("[devmapper] Unmount(%s)", info.mountPath) + if err := syscall.Unmount(info.mountPath, 0); err != nil { + return err + } + log.Debugf("[devmapper] Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + info.mountPath = "" + + return nil +} + +func (devices *DeviceSet) HasDevice(hash string) bool { + devices.Lock() + defer devices.Unlock() + + info, _ := devices.lookupDevice(hash) + return info != nil +} + +func (devices *DeviceSet) HasActivatedDevice(hash string) bool { + info, _ := devices.lookupDevice(hash) + if info == nil { + return false + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + devinfo, _ := getInfo(info.Name()) + return devinfo != nil && devinfo.Exists != 0 +} + +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + devices.devicesLock.Lock() + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + devices.devicesLock.Unlock() + + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = getStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDevice(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceId: info.DeviceId, + Size: info.Size, + TransactionId: info.TransactionId, + } + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil { + return nil, err + } else { + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + } + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + if len(devices.dataDevice) > 0 { + status.DataLoopback = devices.dataDevice + } else { + status.DataLoopback = path.Join(devices.loopbackDir(), "data") + } + if len(devices.metadataDevice) > 0 { + status.MetadataLoopback = devices.metadataDevice + } else { + status.MetadataLoopback = path.Join(devices.loopbackDir(), "metadata") + } + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + + status.SectorSize = blockSizeInSectors * 512 + } + + return status +} + +func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) { + SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, + dataLoopbackSize: DefaultDataLoopbackSize, + metaDataLoopbackSize: DefaultMetaDataLoopbackSize, + baseFsSize: DefaultBaseFsSize, + filesystem: "ext4", + doBlkDiscard: true, + thinpBlockSize: DefaultThinpBlockSize, + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + default: + return nil, fmt.Errorf("Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && devices.dataDevice != "" { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/daemon/graphdriver/devmapper/devmapper.go b/daemon/graphdriver/devmapper/devmapper.go new file mode 100644 index 00000000..d09e7407 --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper.go @@ -0,0 +1,646 @@ +// +build linux + +package devmapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +type DevmapperLogger interface { + log(level int, file string, line int, dmError int, message string) +} + +const ( + DeviceCreate TaskType = iota + DeviceReload + DeviceRemove + DeviceRemoveAll + DeviceSuspend + DeviceResume + DeviceInfo + DeviceDeps + DeviceRename + DeviceVersion + DeviceStatus + DeviceTable + DeviceWaitevent + DeviceList + DeviceClear + DeviceMknodes + DeviceListVersions + DeviceTargetMsg + DeviceSetGeometry +) + +const ( + AddNodeOnResume AddNodeType = iota + AddNodeOnCreate +) + +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrAttachLoopbackDevice = errors.New("loopback mounting failed") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") + ErrRunRemoveDevice = errors.New("running removeDevice failed") + ErrInvalidAddNode = errors.New("Invalide AddNoce type") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") + ErrBusy = errors.New("Device is Busy") + + dmSawBusy bool + dmSawExist bool +) + +type ( + Task struct { + unmanaged *CDmTask + } + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + } + TaskType int + AddNodeType int +) + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) Run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) SetName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) SetMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) SetSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) SetCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) SetAddNode(addNode AddNodeType) error { + if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) SetRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) AddTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) GetInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) GetDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + log.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +func LoopbackSetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + log.Errorf("Error loopbackSetCapacity: %s", err) + return ErrLoopbackSetCapacity + } + return nil +} + +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} + +func UdevWait(cookie uint) error { + if res := DmUdevWait(cookie); res != 1 { + log.Debugf("Failed to wait on udev cookie %d", cookie) + return ErrUdevWait + } + return nil +} + +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger = nil + +func logInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + log.Debugf("Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// Useful helper for cleanup +func RemoveDevice(name string) error { + task := TaskCreate(DeviceRemove) + if task == nil { + return ErrCreateRemoveTask + } + if err := task.SetName(name); err != nil { + log.Debugf("Can't set task name %s", name) + return err + } + if err := task.Run(); err != nil { + return ErrRunRemoveDevice + } + return nil +} + +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + log.Errorf("Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// This is the programmatic example of "dmsetup create" +func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := createTask(DeviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (createPool) %s", err) + } + + UdevWait(cookie) + + return nil +} + +func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := createTask(DeviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate %s", err) + } + + return nil +} + +func createTask(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +func getInfo(name string) (*Info, error) { + task, err := createTask(DeviceInfo, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetInfo() +} + +func getDriverVersion() (string, error) { + task := TaskCreate(DeviceVersion) + if task == nil { + return "", fmt.Errorf("Can't create DeviceVersion task") + } + if err := task.Run(); err != nil { + return "", err + } + return task.GetDriverVersion() +} + +func getStatus(name string) (uint64, uint64, string, string, error) { + task, err := createTask(DeviceStatus, name) + if task == nil { + log.Debugf("getStatus: Error createTask: %s", err) + return 0, 0, "", "", err + } + if err := task.Run(); err != nil { + log.Debugf("getStatus: Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.GetInfo() + if err != nil { + log.Debugf("getStatus: Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + log.Debugf("getStatus: Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) + } + + _, start, length, targetType, params := task.GetNextTarget(0) + return start, length, targetType, params, nil +} + +func setTransactionId(poolName string, oldId uint64, newId uint64) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running setTransactionId %s", err) + } + return nil +} + +func suspendDevice(name string) error { + task, err := createTask(DeviceSuspend, name) + if task == nil { + return err + } + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceSuspend %s", err) + } + return nil +} + +func resumeDevice(name string) error { + task, err := createTask(DeviceResume, name) + if task == nil { + return err + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceResume %s", err) + } + + UdevWait(cookie) + + return nil +} + +func createDevice(poolName string, deviceId *int) error { + log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) + + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + return fmt.Errorf("Error running createDevice %s", err) + } + break + } + return nil +} + +func deleteDevice(poolName string, deviceId int) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running deleteDevice %s", err) + } + return nil +} + +func removeDevice(name string) error { + log.Debugf("[devmapper] removeDevice START") + defer log.Debugf("[devmapper] removeDevice END") + task, err := createTask(DeviceRemove, name) + if task == nil { + return err + } + dmSawBusy = false + if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running removeDevice %s", err) + } + return nil +} + +func activateDevice(poolName string, name string, deviceId int, size uint64) error { + task, err := createTask(DeviceCreate, name) + if task == nil { + return err + } + + params := fmt.Sprintf("%s %d", poolName, deviceId) + if err := task.AddTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + if err := task.SetAddNode(AddNodeOnCreate); err != nil { + return fmt.Errorf("Can't add node %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (activateDevice) %s", err) + } + + UdevWait(cookie) + + return nil +} + +func createSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { + devinfo, _ := getInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := suspendDevice(baseName); err != nil { + return err + } + } + + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + resumeDevice(baseName) + } + return err + } + + if err := task.SetSector(0); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) + } + + break + } + + if doSuspend { + if err := resumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/daemon/graphdriver/devmapper/devmapper_doc.go b/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 00000000..c1c3e389 --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognised ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/daemon/graphdriver/devmapper/devmapper_log.go b/daemon/graphdriver/devmapper/devmapper_log.go new file mode 100644 index 00000000..ec7809cc --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper_log.go @@ -0,0 +1,30 @@ +// +build linux + +package devmapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + } + + if dmLogger != nil { + dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) + } +} diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 00000000..16726199 --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,37 @@ +// +build linux + +package devmapper + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +func init() { + // Reduce the size the the base fs and loopback for the tests + DefaultDataLoopbackSize = 300 * 1024 * 1024 + DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 + DefaultBaseFsSize = 300 * 1024 * 1024 +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/devmapper/devmapper_wrapper.go b/daemon/graphdriver/devmapper/devmapper_wrapper.go new file mode 100644 index 00000000..bd1c6fd5 --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper_wrapper.go @@ -0,0 +1,240 @@ +// +build linux + +package devmapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for defines, maybe we can remove it? +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "unsafe" +) + +type ( + CDmTask C.struct_dm_task + + CLoopInfo64 C.struct_loop_info64 + LoopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncrypt_type uint32 + loEncrypt_key_size uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 + } +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD + + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) + +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + LogWithErrnoInit = logWithErrnoInitFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *CDmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *CDmTask { + return (*CDmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *CDmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *CDmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *CDmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *CDmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *CDmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetInfoFct(task *CDmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *CDmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) + return uintptr(nextp) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 00000000..8f9de85d --- /dev/null +++ b/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,151 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Placeholder interfaces, to be replaced +// at integration. + +// End of placeholder interfaces. + +type Driver struct { + *DeviceSet + home string +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options) + if err != nil { + return nil, err + } + + if err := graphdriver.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + } + + return graphdriver.NaiveDiffDriver(d), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(int64(s.SectorSize)))}, + {"Data file", s.DataLoopback}, + {"Metadata file", s.MetadataLoopback}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Total)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))}, + } + if vStr, err := GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown() + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +func (d *Driver) Create(id, parent string) error { + if err := d.DeviceSet.AddDevice(id, parent); err != nil { + return err + } + + return nil +} + +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + + // Create the target directories if they don't exist + if err := os.MkdirAll(mp, 0755); err != nil && !os.IsExist(err) { + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + return "", err + } + + rootFs := path.Join(mp, "rootfs") + if err := os.MkdirAll(rootFs, 0755); err != nil && !os.IsExist(err) { + d.DeviceSet.UnmountDevice(id) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconscruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.DeviceSet.UnmountDevice(id) + return "", err + } + } + + return rootFs, nil +} + +func (d *Driver) Put(id string) { + if err := d.DeviceSet.UnmountDevice(id); err != nil { + log.Errorf("Warning: error unmounting device %s: %s", id, err) + } +} + +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/daemon/graphdriver/devmapper/ioctl.go b/daemon/graphdriver/devmapper/ioctl.go new file mode 100644 index 00000000..29caab06 --- /dev/null +++ b/daemon/graphdriver/devmapper/ioctl.go @@ -0,0 +1,72 @@ +// +build linux + +package devmapper + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { + loopInfo := &LoopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/daemon/graphdriver/devmapper/mount.go b/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 00000000..f64e9957 --- /dev/null +++ b/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,86 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + file.Close() + if uint64(l) != maxLen { + return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go new file mode 100644 index 00000000..91040db9 --- /dev/null +++ b/daemon/graphdriver/driver.go @@ -0,0 +1,156 @@ +package graphdriver + +import ( + "errors" + "fmt" + "os" + "path" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/mount" +) + +type FsMagic uint64 + +const ( + FsMagicBtrfs = FsMagic(0x9123683E) + FsMagicAufs = FsMagic(0x61756673) +) + +type InitFunc func(root string, options []string) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // Create creates a new, empty, filesystem layer with the + // specified id and parent. Parent may be "". + Create(id, parent string) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (archive.Archive, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (bytes int64, err error) +} + +var ( + DefaultDriver string + // All registred drivers + drivers map[string]InitFunc + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "devicemapper", + "vfs", + } + + ErrNotSupported = errors.New("driver not supported") + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +func init() { + drivers = make(map[string]InitFunc) +} + +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +func GetDriver(name, home string, options []string) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(path.Join(home, name), options) + } + return nil, ErrNotSupported +} + +func New(root string, options []string) (driver Driver, err error) { + for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { + if name != "" { + return GetDriver(name, root, options) + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err = GetDriver(name, root, options) + if err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for _, initFunc := range drivers { + if driver, err = initFunc(root, options); err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +func MakePrivate(mountPoint string) error { + mounted, err := mount.Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + + return mount.ForceMount("", mountPoint, "none", "private") +} diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go new file mode 100644 index 00000000..14e27d60 --- /dev/null +++ b/daemon/graphdriver/fsdiff.go @@ -0,0 +1,166 @@ +package graphdriver + +import ( + "fmt" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" +) + +// naiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type naiveDiffDriver struct { + ProtoDriver +} + +// NaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) +// DiffSize(id, parent string) (bytes int64, err error) +func NaiveDiffDriver(driver ProtoDriver) Driver { + return &naiveDiffDriver{ProtoDriver: driver} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *naiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *naiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + start := time.Now().UTC() + log.Debugf("Start untar layer") + if err = chrootarchive.ApplyLayer(layerFs, diff); err != nil { + return + } + log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + if parent == "" { + return utils.TreeSize(layerFs) + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + err = fmt.Errorf("Driver %s failed to get image parent %s: %s", driver, parent, err) + return + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return + } + + return archive.ChangesSize(layerFs, changes), nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *naiveDiffDriver) DiffSize(id, parent string) (bytes int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go new file mode 100644 index 00000000..6407e120 --- /dev/null +++ b/daemon/graphdriver/graphtest/graphtest.go @@ -0,0 +1,229 @@ +package graphtest + +import ( + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +var ( + drv *Driver +) + +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t *testing.T, name string) *Driver { + root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, root, nil) + if err != nil { + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites { + t.Skip("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t *testing.T, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +func GetDriver(t *testing.T, name string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name) + } else { + drv.refCount++ + } + return drv +} + +func PutDriver(t *testing.T) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } + +} + +// Creates an new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + if err := driver.Create("empty", ""); err != nil { + t.Fatal(err) + } + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") + + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + +} + +func createBase(t *testing.T, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.Create(name, ""); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} + +func DriverTestCreateBase(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + verifyBase(t, driver, "Base") + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} + +func DriverTestCreateSnap(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + + if err := driver.Create("Snap", "Base"); err != nil { + t.Fatal(err) + } + + verifyBase(t, driver, "Snap") + + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go new file mode 100644 index 00000000..aa104500 --- /dev/null +++ b/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,104 @@ +package vfs + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/libcontainer/label" +) + +func init() { + graphdriver.Register("vfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + } + return graphdriver.NaiveDiffDriver(d), nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "vfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func isGNUcoreutils() bool { + if stdout, err := exec.Command("cp", "--version").Output(); err == nil { + return bytes.Contains(stdout, []byte("GNU coreutils")) + } + + return false +} + +func (d *Driver) Create(id, parent string) error { + dir := d.dir(id) + if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { + return err + } + if err := os.Mkdir(dir, 0755); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.Relabel(dir, mountLabel, "") + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := chrootarchive.CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, "dir", path.Base(id)) +} + +func (d *Driver) Remove(id string) error { + if _, err := os.Stat(d.dir(id)); err != nil { + return err + } + return os.RemoveAll(d.dir(id)) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +func (d *Driver) Put(id string) { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here +} + +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/daemon/graphdriver/vfs/vfs_test.go b/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 00000000..1ee6ae4a --- /dev/null +++ b/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,35 @@ +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/history.go b/daemon/history.go new file mode 100644 index 00000000..0b125ad2 --- /dev/null +++ b/daemon/history.go @@ -0,0 +1,33 @@ +package daemon + +import ( + "sort" +) + +// History is a convenience type for storing a list of containers, +// ordered by creation date. +type History []*Container + +func (history *History) Len() int { + return len(*history) +} + +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +func (history *History) Swap(i, j int) { + containers := *history + tmp := containers[i] + containers[i] = containers[j] + containers[j] = tmp +} + +func (history *History) Add(container *Container) { + *history = append(*history, container) +} + +func (history *History) Sort() { + sort.Sort(history) +} diff --git a/daemon/image_delete.go b/daemon/image_delete.go new file mode 100644 index 00000000..332db7b4 --- /dev/null +++ b/daemon/image_delete.go @@ -0,0 +1,156 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + imgs := engine.NewTable("", 0) + if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { + return job.Error(err) + } + if len(imgs.Data) == 0 { + return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) + } + if _, err := imgs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// FIXME: make this private and use the job instead +func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine.Table, first, force, noprune bool) error { + var ( + repoName, tag string + tags = []string{} + ) + + // FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes + repoName, tag = parsers.ParseRepositoryTag(name) + if tag == "" { + tag = graph.DEFAULTTAG + } + + img, err := daemon.Repositories().LookupImage(name) + if err != nil { + if r, _ := daemon.Repositories().Get(repoName); r != nil { + return fmt.Errorf("No such image: %s:%s", repoName, tag) + } + return fmt.Errorf("No such image: %s", name) + } + + if strings.Contains(img.ID, name) { + repoName = "" + tag = "" + } + + byParents, err := daemon.Graph().ByParent() + if err != nil { + return err + } + + repos := daemon.Repositories().ByID()[img.ID] + + //If delete by id, see if the id belong only to one repository + if repoName == "" { + for _, repoAndTag := range repos { + parsedRepo, parsedTag := parsers.ParseRepositoryTag(repoAndTag) + if repoName == "" || repoName == parsedRepo { + repoName = parsedRepo + if parsedTag != "" { + tags = append(tags, parsedTag) + } + } else if repoName != parsedRepo && !force { + // the id belongs to multiple repos, like base:latest and user:test, + // in that case return conflict + return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) + } + } + } else { + tags = append(tags, tag) + } + + if !first && len(tags) > 0 { + return nil + } + + if len(repos) <= 1 { + if err := daemon.canDeleteImage(img.ID, force); err != nil { + return err + } + } + + // Untag the current image + for _, tag := range tags { + tagDeleted, err := daemon.Repositories().Delete(repoName, tag) + if err != nil { + return err + } + if tagDeleted { + out := &engine.Env{} + out.Set("Untagged", repoName+":"+tag) + imgs.Add(out) + eng.Job("log", "untag", img.ID, "").Run() + } + } + tags = daemon.Repositories().ByID()[img.ID] + if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { + if len(byParents[img.ID]) == 0 { + if err := daemon.Repositories().DeleteAll(img.ID); err != nil { + return err + } + if err := daemon.Graph().Delete(img.ID); err != nil { + return err + } + out := &engine.Env{} + out.Set("Deleted", img.ID) + imgs.Add(out) + eng.Job("log", "delete", img.ID, "").Run() + if img.Parent != "" && !noprune { + err := daemon.DeleteImage(eng, img.Parent, imgs, false, force, noprune) + if first { + return err + } + + } + + } + } + return nil +} + +func (daemon *Daemon) canDeleteImage(imgID string, force bool) error { + for _, container := range daemon.List() { + parent, err := daemon.Repositories().LookupImage(container.Image) + if err != nil { + return err + } + + if err := parent.WalkHistory(func(p *image.Image) error { + if imgID == p.ID { + if container.IsRunning() { + if force { + return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID)) + } + return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID)) + } else if !force { + return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID)) + } + } + return nil + }); err != nil { + return err + } + } + return nil +} diff --git a/daemon/info.go b/daemon/info.go new file mode 100644 index 00000000..3d3c9ba6 --- /dev/null +++ b/daemon/info.go @@ -0,0 +1,74 @@ +package daemon + +import ( + "os" + "runtime" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { + images, _ := daemon.Graph().Map() + var imgcount int + if images == nil { + imgcount = 0 + } else { + imgcount = len(images) + } + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err == nil { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err == nil { + operatingSystem = s + } + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + log.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + + // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) + initPath := utils.DockerInitPath("") + if initPath == "" { + // if that fails, we'll just return the path from the daemon + initPath = daemon.SystemInitPath() + } + + cjob := job.Eng.Job("subscribers_count") + env, _ := cjob.Stdout.AddEnv() + if err := cjob.Run(); err != nil { + return job.Error(err) + } + v := &engine.Env{} + v.SetInt("Containers", len(daemon.List())) + v.SetInt("Images", imgcount) + v.Set("Driver", daemon.GraphDriver().String()) + v.SetJson("DriverStatus", daemon.GraphDriver().Status()) + v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit) + v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit) + v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled) + v.SetBool("Debug", os.Getenv("DEBUG") != "") + v.SetInt("NFd", utils.GetTotalUsedFds()) + v.SetInt("NGoroutines", runtime.NumGoroutine()) + v.Set("ExecutionDriver", daemon.ExecutionDriver().Name()) + v.SetInt("NEventsListener", env.GetInt("count")) + v.Set("KernelVersion", kernelVersion) + v.Set("OperatingSystem", operatingSystem) + v.Set("IndexServerAddress", registry.IndexServerAddress()) + v.Set("InitSha1", dockerversion.INITSHA1) + v.Set("InitPath", initPath) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/daemon/inspect.go b/daemon/inspect.go new file mode 100644 index 00000000..cf2ed644 --- /dev/null +++ b/daemon/inspect.go @@ -0,0 +1,67 @@ +package daemon + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + container.Lock() + defer container.Unlock() + if job.GetenvBool("raw") { + b, err := json.Marshal(&struct { + *Container + HostConfig *runconfig.HostConfig + }{container, container.hostConfig}) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + + out := &engine.Env{} + out.Set("Id", container.ID) + out.SetAuto("Created", container.Created) + out.SetJson("Path", container.Path) + out.SetList("Args", container.Args) + out.SetJson("Config", container.Config) + out.SetJson("State", container.State) + out.Set("Image", container.Image) + out.SetJson("NetworkSettings", container.NetworkSettings) + out.Set("ResolvConfPath", container.ResolvConfPath) + out.Set("HostnamePath", container.HostnamePath) + out.Set("HostsPath", container.HostsPath) + out.Set("Name", container.Name) + out.Set("Driver", container.Driver) + out.Set("ExecDriver", container.ExecDriver) + out.Set("MountLabel", container.MountLabel) + out.Set("ProcessLabel", container.ProcessLabel) + out.SetJson("Volumes", container.Volumes) + out.SetJson("VolumesRW", container.VolumesRW) + out.SetJson("AppArmorProfile", container.AppArmorProfile) + + if children, err := daemon.Children(container.Name); err == nil { + for linkAlias, child := range children { + container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + } + + out.SetJson("HostConfig", container.hostConfig) + + container.hostConfig.Links = nil + if _, err := out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff --git a/daemon/kill.go b/daemon/kill.go new file mode 100644 index 00000000..f5f5897c --- /dev/null +++ b/daemon/kill.go @@ -0,0 +1,59 @@ +package daemon + +import ( + "strconv" + "strings" + "syscall" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/signal" +) + +// ContainerKill send signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status { + if n := len(job.Args); n < 1 || n > 2 { + return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) + } + var ( + name = job.Args[0] + sig uint64 + err error + ) + + // If we have a signal, look at it. Otherwise, do nothing + if len(job.Args) == 2 && job.Args[1] != "" { + // Check if we passed the signal as a number: + // The largest legal signal is 31, so let's parse on 5 bits + sig, err = strconv.ParseUint(job.Args[1], 10, 5) + if err != nil { + // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL") + sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")]) + } + + if sig == 0 { + return job.Errorf("Invalid signal: %s", job.Args[1]) + } + } + + if container := daemon.Get(name); container != nil { + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + if err := container.Kill(); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + container.LogEvent("kill") + } else { + // Otherwise, just send the requested signal + if err := container.KillSig(int(sig)); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + // FIXME: Add event for signals + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} diff --git a/daemon/list.go b/daemon/list.go new file mode 100644 index 00000000..25360b67 --- /dev/null +++ b/daemon/list.go @@ -0,0 +1,159 @@ +package daemon + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/pkg/graphdb" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers/filters" +) + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*Container { + return daemon.containers.List() +} + +func (daemon *Daemon) Containers(job *engine.Job) engine.Status { + var ( + foundBefore bool + displayed int + all = job.GetenvBool("all") + since = job.Getenv("since") + before = job.Getenv("before") + n = job.GetenvInt("limit") + size = job.GetenvBool("size") + psFilters filters.Args + filt_exited []int + filt_status []string + ) + outs := engine.NewTable("Created", 0) + + psFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + if i, ok := psFilters["exited"]; ok { + for _, value := range i { + code, err := strconv.Atoi(value) + if err != nil { + return job.Error(err) + } + filt_exited = append(filt_exited, code) + } + } + + filt_status, _ = psFilters["status"] + + names := map[string][]string{} + daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { + names[e.ID()] = append(names[e.ID()], p) + return nil + }, -1) + + var beforeCont, sinceCont *Container + if before != "" { + beforeCont = daemon.Get(before) + if beforeCont == nil { + return job.Error(fmt.Errorf("Could not find container with name or id %s", before)) + } + } + + if since != "" { + sinceCont = daemon.Get(since) + if sinceCont == nil { + return job.Error(fmt.Errorf("Could not find container with name or id %s", since)) + } + } + + errLast := errors.New("last container") + writeCont := func(container *Container) error { + container.Lock() + defer container.Unlock() + if !container.Running && !all && n <= 0 && since == "" && before == "" { + return nil + } + if before != "" && !foundBefore { + if container.ID == beforeCont.ID { + foundBefore = true + } + return nil + } + if n > 0 && displayed == n { + return errLast + } + if since != "" { + if container.ID == sinceCont.ID { + return errLast + } + } + if len(filt_exited) > 0 && !container.Running { + should_skip := true + for _, code := range filt_exited { + if code == container.ExitCode { + should_skip = false + break + } + } + if should_skip { + return nil + } + } + for _, status := range filt_status { + if container.State.StateString() != strings.ToLower(status) { + return nil + } + } + displayed++ + out := &engine.Env{} + out.Set("Id", container.ID) + out.SetList("Names", names[container.ID]) + out.Set("Image", daemon.Repositories().ImageName(container.Image)) + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString)) + } else { + out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) + } + out.SetInt64("Created", container.Created.Unix()) + out.Set("Status", container.State.String()) + str, err := container.NetworkSettings.PortMappingAPI().ToListString() + if err != nil { + return err + } + out.Set("Ports", str) + if size { + sizeRw, sizeRootFs := container.GetSize() + out.SetInt64("SizeRw", sizeRw) + out.SetInt64("SizeRootFs", sizeRootFs) + } + outs.Add(out) + return nil + } + + for _, container := range daemon.List() { + if err := writeCont(container); err != nil { + if err != errLast { + return job.Error(err) + } + break + } + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/daemon/logs.go b/daemon/logs.go new file mode 100644 index 00000000..b4df401e --- /dev/null +++ b/daemon/logs.go @@ -0,0 +1,135 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tailfile" + "github.com/docker/docker/pkg/timeutils" +) + +func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + tail = job.Getenv("tail") + follow = job.GetenvBool("follow") + times = job.GetenvBool("timestamps") + lines = -1 + format string + ) + if !(stdout || stderr) { + return job.Errorf("You must choose at least one stream") + } + if times { + format = timeutils.RFC3339NanoFixed + } + if tail == "" { + tail = "all" + } + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + cLog, err := container.ReadLog("json") + if err != nil && os.IsNotExist(err) { + // Legacy logs + log.Debugf("Old logs format") + if stdout { + cLog, err := container.ReadLog("stdout") + if err != nil { + log.Errorf("Error reading logs (stdout): %s", err) + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + log.Errorf("Error streaming logs (stdout): %s", err) + } + } + if stderr { + cLog, err := container.ReadLog("stderr") + if err != nil { + log.Errorf("Error reading logs (stderr): %s", err) + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + log.Errorf("Error streaming logs (stderr): %s", err) + } + } + } else if err != nil { + log.Errorf("Error reading logs (json): %s", err) + } else { + if tail != "all" { + var err error + lines, err = strconv.Atoi(tail) + if err != nil { + log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err) + lines = -1 + } + } + if lines != 0 { + if lines > 0 { + f := cLog.(*os.File) + ls, err := tailfile.TailFile(f, lines) + if err != nil { + return job.Error(err) + } + tmp := bytes.NewBuffer([]byte{}) + for _, l := range ls { + fmt.Fprintf(tmp, "%s\n", l) + } + cLog = tmp + } + dec := json.NewDecoder(cLog) + l := &jsonlog.JSONLog{} + for { + if err := dec.Decode(l); err == io.EOF { + break + } else if err != nil { + log.Errorf("Error streaming logs: %s", err) + break + } + logLine := l.Log + if times { + logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine) + } + if l.Stream == "stdout" && stdout { + io.WriteString(job.Stdout, logLine) + } + if l.Stream == "stderr" && stderr { + io.WriteString(job.Stderr, logLine) + } + l.Reset() + } + } + } + if follow && container.IsRunning() { + errors := make(chan error, 2) + if stdout { + stdoutPipe := container.StdoutLogPipe() + defer stdoutPipe.Close() + go func() { + errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) + }() + } + if stderr { + stderrPipe := container.StderrLogPipe() + defer stderrPipe.Close() + go func() { + errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) + }() + } + err := <-errors + if err != nil { + log.Errorf("%s", err) + } + } + return engine.StatusOK +} diff --git a/daemon/monitor.go b/daemon/monitor.go new file mode 100644 index 00000000..b5dd7410 --- /dev/null +++ b/daemon/monitor.go @@ -0,0 +1,309 @@ +package daemon + +import ( + "io" + "os/exec" + "sync" + "time" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/runconfig" +) + +const defaultTimeIncrement = 100 + +// containerMonitor monitors the execution of a container's main process. +// If a restart policy is specified for the cotnainer the monitor will ensure that the +// process is restarted based on the rules of the policy. When the container is finally stopped +// the monitor will reset and cleanup any of the container resources such as networking allocations +// and the rootfs +type containerMonitor struct { + mux sync.Mutex + + // container is the container being monitored + container *Container + + // restartPolicy is the current policy being applied to the container monitor + restartPolicy runconfig.RestartPolicy + + // failureCount is the number of times the container has failed to + // start in a row + failureCount int + + // shouldStop signals the monitor that the next time the container exits it is + // either because docker or the user asked for the container to be stopped + shouldStop bool + + // startSignal is a channel that is closes after the container initially starts + startSignal chan struct{} + + // stopChan is used to signal to the monitor whenever there is a wait for the + // next restart so that the timeIncrement is not honored and the user is not + // left waiting for nothing to happen during this time + stopChan chan struct{} + + // timeIncrement is the amount of time to wait between restarts + // this is in milliseconds + timeIncrement int + + // lastStartTime is the time which the monitor last exec'd the container's process + lastStartTime time.Time +} + +// newContainerMonitor returns an initialized containerMonitor for the provided container +// honoring the provided restart policy +func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor { + return &containerMonitor{ + container: container, + restartPolicy: policy, + timeIncrement: defaultTimeIncrement, + stopChan: make(chan struct{}), + startSignal: make(chan struct{}), + } +} + +// Stop signals to the container monitor that it should stop monitoring the container +// for exits the next time the process dies +func (m *containerMonitor) ExitOnNext() { + m.mux.Lock() + + // we need to protect having a double close of the channel when stop is called + // twice or else we will get a panic + if !m.shouldStop { + m.shouldStop = true + close(m.stopChan) + } + + m.mux.Unlock() +} + +// Close closes the container's resources such as networking allocations and +// unmounts the contatiner's root filesystem +func (m *containerMonitor) Close() error { + // Cleanup networking and mounts + m.container.cleanup() + + // FIXME: here is race condition between two RUN instructions in Dockerfile + // because they share same runconfig and change image. Must be fixed + // in builder/builder.go + if err := m.container.toDisk(); err != nil { + log.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err) + + return err + } + + return nil +} + +// Start starts the containers process and monitors it according to the restart policy +func (m *containerMonitor) Start() error { + var ( + err error + exitStatus int + // this variable indicates where we in execution flow: + // before Run or after + afterRun bool + ) + + // ensure that when the monitor finally exits we release the networking and unmount the rootfs + defer func() { + if afterRun { + m.container.Lock() + m.container.setStopped(exitStatus) + defer m.container.Unlock() + } + m.Close() + }() + + // reset the restart count + m.container.RestartCount = -1 + + for { + m.container.RestartCount++ + + if err := m.container.startLoggingToDisk(); err != nil { + m.resetContainer(false) + + return err + } + + pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) + + m.container.LogEvent("start") + + m.lastStartTime = time.Now() + + if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil { + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + if m.container.RestartCount == 0 { + m.resetContainer(false) + + return err + } + + log.Errorf("Error running container: %s", err) + } + + // here container.Lock is already lost + afterRun = true + + m.resetMonitor(err == nil && exitStatus == 0) + + if m.shouldRestart(exitStatus) { + m.container.SetRestarting(exitStatus) + m.container.LogEvent("die") + m.resetContainer(true) + + // sleep with a small time increment between each restart to help avoid issues cased by quickly + // restarting the container because of some types of errors ( networking cut out, etc... ) + m.waitForNextRestart() + + // we need to check this before reentering the loop because the waitForNextRestart could have + // been terminated by a request from a user + if m.shouldStop { + return err + } + continue + } + m.container.LogEvent("die") + m.resetContainer(true) + return err + } +} + +// resetMonitor resets the stateful fields on the containerMonitor based on the +// previous runs success or failure. Reguardless of success, if the container had +// an execution time of more than 10s then reset the timer back to the default +func (m *containerMonitor) resetMonitor(successful bool) { + executionTime := time.Now().Sub(m.lastStartTime).Seconds() + + if executionTime > 10 { + m.timeIncrement = defaultTimeIncrement + } else { + // otherwise we need to increment the amount of time we wait before restarting + // the process. We will build up by multiplying the increment by 2 + m.timeIncrement *= 2 + } + + // the container exited successfully so we need to reset the failure counter + if successful { + m.failureCount = 0 + } else { + m.failureCount++ + } +} + +// waitForNextRestart waits with the default time increment to restart the container unless +// a user or docker asks for the container to be stopped +func (m *containerMonitor) waitForNextRestart() { + select { + case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): + case <-m.stopChan: + } +} + +// shouldRestart checks the restart policy and applies the rules to determine if +// the container's process should be restarted +func (m *containerMonitor) shouldRestart(exitStatus int) bool { + m.mux.Lock() + defer m.mux.Unlock() + + // do not restart if the user or docker has requested that this container be stopped + if m.shouldStop { + return false + } + + switch m.restartPolicy.Name { + case "always": + return true + case "on-failure": + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount >= max { + log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", max) + return false + } + + return exitStatus != 0 + } + + return false +} + +// callback ensures that the container's state is properly updated after we +// received ack from the execution drivers +func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int) { + if processConfig.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave + // which we close here. + if c, ok := processConfig.Stdout.(io.Closer); ok { + c.Close() + } + } + + m.container.setRunning(pid) + + // signal that the process has started + // close channel only if not closed + select { + case <-m.startSignal: + default: + close(m.startSignal) + } + + if err := m.container.ToDisk(); err != nil { + log.Debugf("%s", err) + } +} + +// resetContainer resets the container's IO and ensures that the command is able to be executed again +// by copying the data into a new struct +// if lock is true, then container locked during reset +func (m *containerMonitor) resetContainer(lock bool) { + container := m.container + if lock { + container.Lock() + defer container.Unlock() + } + + if container.Config.OpenStdin { + if err := container.stdin.Close(); err != nil { + log.Errorf("%s: Error close stdin: %s", container.ID, err) + } + } + + if err := container.stdout.Clean(); err != nil { + log.Errorf("%s: Error close stdout: %s", container.ID, err) + } + + if err := container.stderr.Clean(); err != nil { + log.Errorf("%s: Error close stderr: %s", container.ID, err) + } + + if container.command != nil && container.command.ProcessConfig.Terminal != nil { + if err := container.command.ProcessConfig.Terminal.Close(); err != nil { + log.Errorf("%s: Error closing terminal: %s", container.ID, err) + } + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } + + c := container.command.ProcessConfig.Cmd + + container.command.ProcessConfig.Cmd = exec.Cmd{ + Stdin: c.Stdin, + Stdout: c.Stdout, + Stderr: c.Stderr, + Path: c.Path, + Env: c.Env, + ExtraFiles: c.ExtraFiles, + Args: c.Args, + Dir: c.Dir, + SysProcAttr: c.SysProcAttr, + } +} diff --git a/daemon/network_settings.go b/daemon/network_settings.go new file mode 100644 index 00000000..69c15be3 --- /dev/null +++ b/daemon/network_settings.go @@ -0,0 +1,43 @@ +package daemon + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" +) + +// FIXME: move deprecated port stuff to nat to clean up the core. +type PortMapping map[string]string // Deprecated + +type NetworkSettings struct { + IPAddress string + IPPrefixLen int + MacAddress string + Gateway string + Bridge string + PortMapping map[string]PortMapping // Deprecated + Ports nat.PortMap +} + +func (settings *NetworkSettings) PortMappingAPI() *engine.Table { + var outs = engine.NewTable("", 0) + for port, bindings := range settings.Ports { + p, _ := nat.ParsePort(port.Port()) + if len(bindings) == 0 { + out := &engine.Env{} + out.SetInt("PrivatePort", p) + out.Set("Type", port.Proto()) + outs.Add(out) + continue + } + for _, binding := range bindings { + out := &engine.Env{} + h, _ := nat.ParsePort(binding.HostPort) + out.SetInt("PrivatePort", p) + out.SetInt("PublicPort", h) + out.Set("Type", port.Proto()) + out.Set("IP", binding.HostIp) + outs.Add(out) + } + } + return outs +} diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go new file mode 100644 index 00000000..e05a2c21 --- /dev/null +++ b/daemon/networkdriver/bridge/driver.go @@ -0,0 +1,544 @@ +package bridge + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + "sync" + + "github.com/docker/docker/daemon/networkdriver" + "github.com/docker/docker/daemon/networkdriver/ipallocator" + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/daemon/networkdriver/portmapper" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/iptables" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/libcontainer/netlink" +) + +const ( + DefaultNetworkBridge = "docker0" + MaxAllocatedPortAttempts = 10 +) + +// Network interface represents the networking stack of a container +type networkInterface struct { + IP net.IP + PortMappings []net.Addr // there are mappings to the host interfaces +} + +type ifaces struct { + c map[string]*networkInterface + sync.Mutex +} + +func (i *ifaces) Set(key string, n *networkInterface) { + i.Lock() + i.c[key] = n + i.Unlock() +} + +func (i *ifaces) Get(key string) *networkInterface { + i.Lock() + res := i.c[key] + i.Unlock() + return res +} + +var ( + addrs = []string{ + // Here we don't follow the convention of using the 1st IP of the range for the gateway. + // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. + // In theory this shouldn't matter - in practice there's bound to be a few scripts relying + // on the internal addressing or other stupid things like that. + // They shouldn't, but hey, let's not break them unless we really have to. + "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 + "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive + "10.1.42.1/16", + "10.42.42.1/16", + "172.16.42.1/24", + "172.16.43.1/24", + "172.16.44.1/24", + "10.0.42.1/24", + "10.0.43.1/24", + "192.168.42.1/24", + "192.168.43.1/24", + "192.168.44.1/24", + } + + bridgeIface string + bridgeNetwork *net.IPNet + + defaultBindingIP = net.ParseIP("0.0.0.0") + currentInterfaces = ifaces{c: make(map[string]*networkInterface)} +) + +func InitDriver(job *engine.Job) engine.Status { + var ( + network *net.IPNet + enableIPTables = job.GetenvBool("EnableIptables") + icc = job.GetenvBool("InterContainerCommunication") + ipMasq = job.GetenvBool("EnableIpMasq") + ipForward = job.GetenvBool("EnableIpForward") + bridgeIP = job.Getenv("BridgeIP") + fixedCIDR = job.Getenv("FixedCIDR") + ) + + if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { + defaultBindingIP = net.ParseIP(defaultIP) + } + + bridgeIface = job.Getenv("BridgeIface") + usingDefaultBridge := false + if bridgeIface == "" { + usingDefaultBridge = true + bridgeIface = DefaultNetworkBridge + } + + addr, err := networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + // If we're not using the default bridge, fail without trying to create it + if !usingDefaultBridge { + return job.Error(err) + } + // If the iface is not found, try to create it + if err := createBridge(bridgeIP); err != nil { + return job.Error(err) + } + + addr, err = networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + return job.Error(err) + } + network = addr.(*net.IPNet) + } else { + network = addr.(*net.IPNet) + // validate that the bridge ip matches the ip specified by BridgeIP + if bridgeIP != "" { + bip, _, err := net.ParseCIDR(bridgeIP) + if err != nil { + return job.Error(err) + } + if !network.IP.Equal(bip) { + return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip) + } + } + } + + // Configure iptables for link support + if enableIPTables { + if err := setupIPTables(addr, icc, ipMasq); err != nil { + return job.Error(err) + } + } + + if ipForward { + // Enable IPv4 forwarding + if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { + job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) + } + } + + // We can always try removing the iptables + if err := iptables.RemoveExistingChain("DOCKER"); err != nil { + return job.Error(err) + } + + if enableIPTables { + chain, err := iptables.NewChain("DOCKER", bridgeIface) + if err != nil { + return job.Error(err) + } + portmapper.SetIptablesChain(chain) + } + + bridgeNetwork = network + if fixedCIDR != "" { + _, subnet, err := net.ParseCIDR(fixedCIDR) + if err != nil { + return job.Error(err) + } + log.Debugf("Subnet: %v", subnet) + if err := ipallocator.RegisterSubnet(bridgeNetwork, subnet); err != nil { + return job.Error(err) + } + } + + // https://github.com/docker/docker/issues/2768 + job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) + + for name, f := range map[string]engine.Handler{ + "allocate_interface": Allocate, + "release_interface": Release, + "allocate_port": AllocatePort, + "link": LinkContainers, + } { + if err := job.Eng.Register(name, f); err != nil { + return job.Error(err) + } + } + return engine.StatusOK +} + +func setupIPTables(addr net.Addr, icc, ipmasq bool) error { + // Enable NAT + + if ipmasq { + natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-o", bridgeIface, "-j", "MASQUERADE"} + + if !iptables.Exists(natArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { + return fmt.Errorf("Unable to enable network bridge NAT: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables postrouting: %s", output) + } + } + } + + var ( + args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} + acceptArgs = append(args, "ACCEPT") + dropArgs = append(args, "DROP") + ) + + if !icc { + iptables.Raw(append([]string{"-D"}, acceptArgs...)...) + + if !iptables.Exists(dropArgs...) { + log.Debugf("Disable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { + return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error disabling intercontainer communication: %s", output) + } + } + } else { + iptables.Raw(append([]string{"-D"}, dropArgs...)...) + + if !iptables.Exists(acceptArgs...) { + log.Debugf("Enable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { + return fmt.Errorf("Unable to allow intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error enabling intercontainer communication: %s", output) + } + } + } + + // Accept all non-intercontainer outgoing packets + outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} + if !iptables.Exists(outgoingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow outgoing packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow outgoing: %s", output) + } + } + + // Accept incoming packets for existing connections + existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} + + if !iptables.Exists(existingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow incoming packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow incoming: %s", output) + } + } + return nil +} + +// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, +// and attempts to configure it with an address which doesn't conflict with any other interface on the host. +// If it can't find an address which doesn't conflict, it will return an error. +func createBridge(bridgeIP string) error { + nameservers := []string{} + resolvConf, _ := resolvconf.Get() + // we don't check for an error here, because we don't really care + // if we can't read /etc/resolv.conf. So instead we skip the append + // if resolvConf is nil. It either doesn't exist, or we can't read it + // for some reason. + if resolvConf != nil { + nameservers = append(nameservers, resolvconf.GetNameserversAsCIDR(resolvConf)...) + } + + var ifaceAddr string + if len(bridgeIP) != 0 { + _, _, err := net.ParseCIDR(bridgeIP) + if err != nil { + return err + } + ifaceAddr = bridgeIP + } else { + for _, addr := range addrs { + _, dockerNetwork, err := net.ParseCIDR(addr) + if err != nil { + return err + } + if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { + if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { + ifaceAddr = addr + break + } else { + log.Debugf("%s %s", addr, err) + } + } + } + } + + if ifaceAddr == "" { + return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) + } + log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) + + if err := createBridgeIface(bridgeIface); err != nil { + return err + } + + iface, err := net.InterfaceByName(bridgeIface) + if err != nil { + return err + } + + ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) + if err != nil { + return err + } + + if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { + return fmt.Errorf("Unable to add private network: %s", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to start network bridge: %s", err) + } + return nil +} + +func createBridgeIface(name string) error { + kv, err := kernel.GetKernelVersion() + // only set the bridge's mac address if the kernel version is > 3.3 + // before that it was not supported + setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3) + log.Debugf("setting bridge mac address = %v", setBridgeMacAddr) + return netlink.CreateBridge(name, setBridgeMacAddr) +} + +// Generate a IEEE802 compliant MAC address from the given IP address. +// +// The generator is guaranteed to be consistent: the same IP will always yield the same +// MAC address. This is to avoid ARP cache issues. +func generateMacAddr(ip net.IP) net.HardwareAddr { + hw := make(net.HardwareAddr, 6) + + // The first byte of the MAC address has to comply with these rules: + // 1. Unicast: Set the least-significant bit to 0. + // 2. Address is locally administered: Set the second-least-significant bit (U/L) to 1. + // 3. As "small" as possible: The veth address has to be "smaller" than the bridge address. + hw[0] = 0x02 + + // The first 24 bits of the MAC represent the Organizationally Unique Identifier (OUI). + // Since this address is locally administered, we can do whatever we want as long as + // it doesn't conflict with other addresses. + hw[1] = 0x42 + + // Insert the IP address into the last 32 bits of the MAC address. + // This is a simple way to guarantee the address will be consistent and unique. + copy(hw[2:], ip.To4()) + + return hw +} + +// Allocate a network interface +func Allocate(job *engine.Job) engine.Status { + var ( + ip net.IP + mac net.HardwareAddr + err error + id = job.Args[0] + requestedIP = net.ParseIP(job.Getenv("RequestedIP")) + ) + + if requestedIP != nil { + ip, err = ipallocator.RequestIP(bridgeNetwork, requestedIP) + } else { + ip, err = ipallocator.RequestIP(bridgeNetwork, nil) + } + if err != nil { + return job.Error(err) + } + + // If no explicit mac address was given, generate a random one. + if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil { + mac = generateMacAddr(ip) + } + + out := engine.Env{} + out.Set("IP", ip.String()) + out.Set("Mask", bridgeNetwork.Mask.String()) + out.Set("Gateway", bridgeNetwork.IP.String()) + out.Set("MacAddress", mac.String()) + out.Set("Bridge", bridgeIface) + + size, _ := bridgeNetwork.Mask.Size() + out.SetInt("IPPrefixLen", size) + + currentInterfaces.Set(id, &networkInterface{ + IP: ip, + }) + + out.WriteTo(job.Stdout) + + return engine.StatusOK +} + +// release an interface for a select ip +func Release(job *engine.Job) engine.Status { + var ( + id = job.Args[0] + containerInterface = currentInterfaces.Get(id) + ) + + if containerInterface == nil { + return job.Errorf("No network information to release for %s", id) + } + + for _, nat := range containerInterface.PortMappings { + if err := portmapper.Unmap(nat); err != nil { + log.Infof("Unable to unmap port %s: %s", nat, err) + } + } + + if err := ipallocator.ReleaseIP(bridgeNetwork, containerInterface.IP); err != nil { + log.Infof("Unable to release ip %s", err) + } + return engine.StatusOK +} + +// Allocate an external port and map it to the interface +func AllocatePort(job *engine.Job) engine.Status { + var ( + err error + + ip = defaultBindingIP + id = job.Args[0] + hostIP = job.Getenv("HostIP") + hostPort = job.GetenvInt("HostPort") + containerPort = job.GetenvInt("ContainerPort") + proto = job.Getenv("Proto") + network = currentInterfaces.Get(id) + ) + + if hostIP != "" { + ip = net.ParseIP(hostIP) + if ip == nil { + return job.Errorf("Bad parameter: invalid host ip %s", hostIP) + } + } + + // host ip, proto, and host port + var container net.Addr + switch proto { + case "tcp": + container = &net.TCPAddr{IP: network.IP, Port: containerPort} + case "udp": + container = &net.UDPAddr{IP: network.IP, Port: containerPort} + default: + return job.Errorf("unsupported address type %s", proto) + } + + // + // Try up to 10 times to get a port that's not already allocated. + // + // In the event of failure to bind, return the error that portmapper.Map + // yields. + // + + var host net.Addr + for i := 0; i < MaxAllocatedPortAttempts; i++ { + if host, err = portmapper.Map(container, ip, hostPort); err == nil { + break + } + + if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok { + // There is no point in immediately retrying to map an explicitly + // chosen port. + if hostPort != 0 { + job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error()) + break + } + + // Automatically chosen 'free' port failed to bind: move on the next. + job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String()) + } else { + // some other error during mapping + job.Logf("Received an unexpected error during port allocation: %s", err.Error()) + break + } + } + + if err != nil { + return job.Error(err) + } + + network.PortMappings = append(network.PortMappings, host) + + out := engine.Env{} + switch netAddr := host.(type) { + case *net.TCPAddr: + out.Set("HostIP", netAddr.IP.String()) + out.SetInt("HostPort", netAddr.Port) + case *net.UDPAddr: + out.Set("HostIP", netAddr.IP.String()) + out.SetInt("HostPort", netAddr.Port) + } + if _, err := out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + + return engine.StatusOK +} + +func LinkContainers(job *engine.Job) engine.Status { + var ( + action = job.Args[0] + childIP = job.Getenv("ChildIP") + parentIP = job.Getenv("ParentIP") + ignoreErrors = job.GetenvBool("IgnoreErrors") + ports = job.GetenvList("Ports") + ) + split := func(p string) (string, string) { + parts := strings.Split(p, "/") + return parts[0], parts[1] + } + + for _, p := range ports { + port, proto := split(p) + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", parentIP, + "--dport", port, + "-d", childIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + return job.Error(err) + } else if len(output) != 0 { + return job.Errorf("Error toggle iptables forward: %s", output) + } + + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", childIP, + "--sport", port, + "-d", parentIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + return job.Error(err) + } else if len(output) != 0 { + return job.Errorf("Error toggle iptables forward: %s", output) + } + } + return engine.StatusOK +} diff --git a/daemon/networkdriver/bridge/driver_test.go b/daemon/networkdriver/bridge/driver_test.go new file mode 100644 index 00000000..1bda2f43 --- /dev/null +++ b/daemon/networkdriver/bridge/driver_test.go @@ -0,0 +1,120 @@ +package bridge + +import ( + "net" + "strconv" + "testing" + + "github.com/docker/docker/daemon/networkdriver/portmapper" + "github.com/docker/docker/engine" +) + +func init() { + // reset the new proxy command for mocking out the userland proxy in tests + portmapper.NewProxy = portmapper.NewMockProxyCommand +} + +func findFreePort(t *testing.T) int { + l, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal("Failed to find a free port") + } + defer l.Close() + + result, err := net.ResolveTCPAddr("tcp", l.Addr().String()) + if err != nil { + t.Fatal("Failed to resolve address to identify free port") + } + return result.Port +} + +func newPortAllocationJob(eng *engine.Engine, port int) (job *engine.Job) { + strPort := strconv.Itoa(port) + + job = eng.Job("allocate_port", "container_id") + job.Setenv("HostIP", "127.0.0.1") + job.Setenv("HostPort", strPort) + job.Setenv("Proto", "tcp") + job.Setenv("ContainerPort", strPort) + return +} + +func newPortAllocationJobWithInvalidHostIP(eng *engine.Engine, port int) (job *engine.Job) { + strPort := strconv.Itoa(port) + + job = eng.Job("allocate_port", "container_id") + job.Setenv("HostIP", "localhost") + job.Setenv("HostPort", strPort) + job.Setenv("Proto", "tcp") + job.Setenv("ContainerPort", strPort) + return +} + +func TestAllocatePortDetection(t *testing.T) { + eng := engine.New() + eng.Logging = false + + freePort := findFreePort(t) + + // Init driver + job := eng.Job("initdriver") + if res := InitDriver(job); res != engine.StatusOK { + t.Fatal("Failed to initialize network driver") + } + + // Allocate interface + job = eng.Job("allocate_interface", "container_id") + if res := Allocate(job); res != engine.StatusOK { + t.Fatal("Failed to allocate network interface") + } + + // Allocate same port twice, expect failure on second call + job = newPortAllocationJob(eng, freePort) + if res := AllocatePort(job); res != engine.StatusOK { + t.Fatal("Failed to find a free port to allocate") + } + if res := AllocatePort(job); res == engine.StatusOK { + t.Fatal("Duplicate port allocation granted by AllocatePort") + } +} + +func TestHostnameFormatChecking(t *testing.T) { + eng := engine.New() + eng.Logging = false + + freePort := findFreePort(t) + + // Init driver + job := eng.Job("initdriver") + if res := InitDriver(job); res != engine.StatusOK { + t.Fatal("Failed to initialize network driver") + } + + // Allocate interface + job = eng.Job("allocate_interface", "container_id") + if res := Allocate(job); res != engine.StatusOK { + t.Fatal("Failed to allocate network interface") + } + + // Allocate port with invalid HostIP, expect failure with Bad Request http status + job = newPortAllocationJobWithInvalidHostIP(eng, freePort) + if res := AllocatePort(job); res == engine.StatusOK { + t.Fatal("Failed to check invalid HostIP") + } +} + +func TestMacAddrGeneration(t *testing.T) { + ip := net.ParseIP("192.168.0.1") + mac := generateMacAddr(ip).String() + + // Should be consistent. + if generateMacAddr(ip).String() != mac { + t.Fatal("Inconsistent MAC address") + } + + // Should be unique. + ip2 := net.ParseIP("192.168.0.2") + if generateMacAddr(ip2).String() == mac { + t.Fatal("Non-unique MAC address") + } +} diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go new file mode 100644 index 00000000..a1aaabbd --- /dev/null +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -0,0 +1,150 @@ +package ipallocator + +import ( + "encoding/binary" + "errors" + "net" + "sync" + + "github.com/docker/docker/daemon/networkdriver" +) + +// allocatedMap is thread-unsafe set of allocated IP +type allocatedMap struct { + p map[uint32]struct{} + last uint32 + begin uint32 + end uint32 +} + +func newAllocatedMap(network *net.IPNet) *allocatedMap { + firstIP, lastIP := networkdriver.NetworkRange(network) + begin := ipToInt(firstIP) + 2 + end := ipToInt(lastIP) - 1 + return &allocatedMap{ + p: make(map[uint32]struct{}), + begin: begin, + end: end, + last: begin - 1, // so first allocated will be begin + } +} + +type networkSet map[string]*allocatedMap + +var ( + ErrNoAvailableIPs = errors.New("no available ip addresses on network") + ErrIPAlreadyAllocated = errors.New("ip already allocated") + ErrIPOutOfRange = errors.New("requested ip is out of range") + ErrNetworkAlreadyRegistered = errors.New("network already registered") + ErrBadSubnet = errors.New("network does not contain specified subnet") +) + +var ( + lock = sync.Mutex{} + allocatedIPs = networkSet{} +) + +// RegisterSubnet registers network in global allocator with bounds +// defined by subnet. If you want to use network range you must call +// this method before first RequestIP, otherwise full network range will be used +func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error { + lock.Lock() + defer lock.Unlock() + key := network.String() + if _, ok := allocatedIPs[key]; ok { + return ErrNetworkAlreadyRegistered + } + n := newAllocatedMap(network) + beginIP, endIP := networkdriver.NetworkRange(subnet) + begin, end := ipToInt(beginIP)+1, ipToInt(endIP)-1 + if !(begin >= n.begin && end <= n.end && begin < end) { + return ErrBadSubnet + } + n.begin = begin + n.end = end + n.last = begin - 1 + allocatedIPs[key] = n + return nil +} + +// RequestIP requests an available ip from the given network. It +// will return the next available ip if the ip provided is nil. If the +// ip provided is not nil it will validate that the provided ip is available +// for use or return an error +func RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) { + lock.Lock() + defer lock.Unlock() + key := network.String() + allocated, ok := allocatedIPs[key] + if !ok { + allocated = newAllocatedMap(network) + allocatedIPs[key] = allocated + } + + if ip == nil { + return allocated.getNextIP() + } + return allocated.checkIP(ip) +} + +// ReleaseIP adds the provided ip back into the pool of +// available ips to be returned for use. +func ReleaseIP(network *net.IPNet, ip net.IP) error { + lock.Lock() + defer lock.Unlock() + if allocated, exists := allocatedIPs[network.String()]; exists { + pos := ipToInt(ip) + delete(allocated.p, pos) + } + return nil +} + +func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { + pos := ipToInt(ip) + + // Verify that the IP address has not been already allocated. + if _, ok := allocated.p[pos]; ok { + return nil, ErrIPAlreadyAllocated + } + + // Verify that the IP address is within our network range. + if pos < allocated.begin || pos > allocated.end { + return nil, ErrIPOutOfRange + } + + // Register the IP. + allocated.p[pos] = struct{}{} + allocated.last = pos + + return ip, nil +} + +// return an available ip if one is currently available. If not, +// return the next available ip for the nextwork +func (allocated *allocatedMap) getNextIP() (net.IP, error) { + for pos := allocated.last + 1; pos != allocated.last; pos++ { + if pos > allocated.end { + pos = allocated.begin + } + if _, ok := allocated.p[pos]; ok { + continue + } + allocated.p[pos] = struct{}{} + allocated.last = pos + return intToIP(pos), nil + } + return nil, ErrNoAvailableIPs +} + +// Converts a 4 bytes IP into a 32 bit integer +func ipToInt(ip net.IP) uint32 { + return binary.BigEndian.Uint32(ip.To4()) +} + +// Converts 32 bit integer into a 4 bytes IP address +func intToIP(n uint32) net.IP { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, n) + ip := net.IP(b) + return ip +} diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go new file mode 100644 index 00000000..056c13b6 --- /dev/null +++ b/daemon/networkdriver/ipallocator/allocator_test.go @@ -0,0 +1,434 @@ +package ipallocator + +import ( + "fmt" + "net" + "testing" +) + +func reset() { + allocatedIPs = networkSet{} +} + +func TestRequestNewIps(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + var ip net.IP + var err error + for i := 2; i < 10; i++ { + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected { + t.Fatalf("Expected ip %s got %s", expected, ip.String()) + } + } + value := intToIP(ipToInt(ip) + 1).String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + if ip.String() != value { + t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String()) + } +} + +func TestReleaseIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } +} + +func TestGetReleasedIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + value := ip.String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + + for i := 0; i < 252; i++ { + _, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + err = ReleaseIP(network, ip) + if err != nil { + t.Fatal(err) + } + } + + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if ip.String() != value { + t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) + } +} + +func TestRequestSpecificIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 224}, + } + + ip := net.ParseIP("192.168.0.5") + + // Request a "good" IP. + if _, err := RequestIP(network, ip); err != nil { + t.Fatal(err) + } + + // Request the same IP again. + if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated { + t.Fatalf("Got the same IP twice: %#v", err) + } + + // Request an out of range IP. + if _, err := RequestIP(network, net.ParseIP("192.168.0.42")); err != ErrIPOutOfRange { + t.Fatalf("Got an out of range IP: %#v", err) + } +} + +func TestConversion(t *testing.T) { + ip := net.ParseIP("127.0.0.1") + i := ipToInt(ip) + if i == 0 { + t.Fatal("converted to zero") + } + conv := intToIP(i) + if !ip.Equal(conv) { + t.Error(conv.String()) + } +} + +func TestIPAllocator(t *testing.T) { + expectedIPs := []net.IP{ + 0: net.IPv4(127, 0, 0, 2), + 1: net.IPv4(127, 0, 0, 3), + 2: net.IPv4(127, 0, 0, 4), + 3: net.IPv4(127, 0, 0, 5), + 4: net.IPv4(127, 0, 0, 6), + } + + gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") + network := &net.IPNet{IP: gwIP, Mask: n.Mask} + // Pool after initialisation (f = free, u = used) + // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that + // order. + for i := 0; i < 5; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, expectedIPs[i], ip) + } + // Before loop begin + // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 0 + // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 1 + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 2 + // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) + // ↑ + + // After i = 3 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) + // ↑ + + // After i = 4 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) + // ↑ + + // Check that there are no more IPs + ip, err := RequestIP(network, nil) + if err == nil { + t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip) + } + + // Release some IPs in non-sequential order + if err := ReleaseIP(network, expectedIPs[3]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) + // ↑ + + if err := ReleaseIP(network, expectedIPs[2]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) + // ↑ + + if err := ReleaseIP(network, expectedIPs[4]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // Make sure that IPs are reused in sequential order, starting + // with the first released IP + newIPs := make([]net.IP, 3) + for i := 0; i < 3; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + newIPs[i] = ip + } + assertIPEquals(t, expectedIPs[2], newIPs[0]) + assertIPEquals(t, expectedIPs[3], newIPs[1]) + assertIPEquals(t, expectedIPs[4], newIPs[2]) + + _, err = RequestIP(network, nil) + if err == nil { + t.Fatal("There shouldn't be any IP addresses at this point") + } +} + +func TestAllocateFirstIP(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 0}, + Mask: []byte{255, 255, 255, 0}, + } + + firstIP := network.IP.To4().Mask(network.Mask) + first := ipToInt(firstIP) + 1 + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + allocated := ipToInt(ip) + + if allocated == first { + t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) + } +} + +func TestAllocateAllIps(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + var ( + current, first net.IP + err error + isFirst = true + ) + + for err == nil { + current, err = RequestIP(network, nil) + if isFirst { + first = current + isFirst = false + } + } + + if err != ErrNoAvailableIPs { + t.Fatal(err) + } + + if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { + t.Fatal(err) + } + + if err := ReleaseIP(network, first); err != nil { + t.Fatal(err) + } + + again, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, first, again) +} + +func TestAllocateDifferentSubnets(t *testing.T) { + defer reset() + network1 := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + network2 := &net.IPNet{ + IP: []byte{127, 0, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + expectedIPs := []net.IP{ + 0: net.IPv4(192, 168, 0, 2), + 1: net.IPv4(192, 168, 0, 3), + 2: net.IPv4(127, 0, 0, 2), + 3: net.IPv4(127, 0, 0, 3), + } + + ip11, err := RequestIP(network1, nil) + if err != nil { + t.Fatal(err) + } + ip12, err := RequestIP(network1, nil) + if err != nil { + t.Fatal(err) + } + ip21, err := RequestIP(network2, nil) + if err != nil { + t.Fatal(err) + } + ip22, err := RequestIP(network2, nil) + if err != nil { + t.Fatal(err) + } + assertIPEquals(t, expectedIPs[0], ip11) + assertIPEquals(t, expectedIPs[1], ip12) + assertIPEquals(t, expectedIPs[2], ip21) + assertIPEquals(t, expectedIPs[3], ip22) +} +func TestRegisterBadTwice(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 1, 1}, + Mask: []byte{255, 255, 255, 0}, + } + subnet := &net.IPNet{ + IP: []byte{192, 168, 1, 8}, + Mask: []byte{255, 255, 255, 248}, + } + + if err := RegisterSubnet(network, subnet); err != nil { + t.Fatal(err) + } + subnet = &net.IPNet{ + IP: []byte{192, 168, 1, 16}, + Mask: []byte{255, 255, 255, 248}, + } + if err := RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered { + t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err) + } +} + +func TestRegisterBadRange(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 1, 1}, + Mask: []byte{255, 255, 255, 0}, + } + subnet := &net.IPNet{ + IP: []byte{192, 168, 1, 1}, + Mask: []byte{255, 255, 0, 0}, + } + if err := RegisterSubnet(network, subnet); err != ErrBadSubnet { + t.Fatalf("Expected ErrBadSubnet error, got %v", err) + } +} + +func TestAllocateFromRange(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + // 192.168.1.9 - 192.168.1.14 + subnet := &net.IPNet{ + IP: []byte{192, 168, 0, 8}, + Mask: []byte{255, 255, 255, 248}, + } + if err := RegisterSubnet(network, subnet); err != nil { + t.Fatal(err) + } + expectedIPs := []net.IP{ + 0: net.IPv4(192, 168, 0, 9), + 1: net.IPv4(192, 168, 0, 10), + 2: net.IPv4(192, 168, 0, 11), + 3: net.IPv4(192, 168, 0, 12), + 4: net.IPv4(192, 168, 0, 13), + 5: net.IPv4(192, 168, 0, 14), + } + for _, ip := range expectedIPs { + rip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + assertIPEquals(t, ip, rip) + } + + if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { + t.Fatalf("Expected ErrNoAvailableIPs error, got %v", err) + } + for _, ip := range expectedIPs { + ReleaseIP(network, ip) + rip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + assertIPEquals(t, ip, rip) + } +} + +func assertIPEquals(t *testing.T, ip1, ip2 net.IP) { + if !ip1.Equal(ip2) { + t.Fatalf("Expected IP %s, got %s", ip1, ip2) + } +} + +func BenchmarkRequestIP(b *testing.B) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 253; j++ { + _, err := RequestIP(network, nil) + if err != nil { + b.Fatal(err) + } + } + reset() + } +} diff --git a/daemon/networkdriver/network.go b/daemon/networkdriver/network.go new file mode 100644 index 00000000..8dda789d --- /dev/null +++ b/daemon/networkdriver/network.go @@ -0,0 +1,10 @@ +package networkdriver + +import ( + "errors" +) + +var ( + ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") + ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") +) diff --git a/daemon/networkdriver/network_test.go b/daemon/networkdriver/network_test.go new file mode 100644 index 00000000..d655cb30 --- /dev/null +++ b/daemon/networkdriver/network_test.go @@ -0,0 +1,190 @@ +package networkdriver + +import ( + "github.com/docker/libcontainer/netlink" + "net" + "testing" +) + +func TestNonOverlapingNameservers(t *testing.T) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + nameservers := []string{ + "127.0.0.1/32", + } + + if err := CheckNameserverOverlaps(nameservers, network); err != nil { + t.Fatal(err) + } +} + +func TestOverlapingNameservers(t *testing.T) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + nameservers := []string{ + "192.168.0.1/32", + } + + if err := CheckNameserverOverlaps(nameservers, network); err == nil { + t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err) + } +} + +func TestCheckRouteOverlaps(t *testing.T) { + orig := networkGetRoutesFct + defer func() { + networkGetRoutesFct = orig + }() + networkGetRoutesFct = func() ([]netlink.Route, error) { + routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} + + routes := []netlink.Route{} + for _, addr := range routesData { + _, netX, _ := net.ParseCIDR(addr) + routes = append(routes, netlink.Route{IPNet: netX}) + } + return routes, nil + } + + _, netX, _ := net.ParseCIDR("172.16.0.1/24") + if err := CheckRouteOverlaps(netX); err != nil { + t.Fatal(err) + } + + _, netX, _ = net.ParseCIDR("10.0.2.0/24") + if err := CheckRouteOverlaps(netX); err == nil { + t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") + } +} + +func TestCheckNameserverOverlaps(t *testing.T) { + nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} + + _, netX, _ := net.ParseCIDR("10.0.2.3/32") + + if err := CheckNameserverOverlaps(nameservers, netX); err == nil { + t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) + } + + _, netX, _ = net.ParseCIDR("192.168.102.2/32") + + if err := CheckNameserverOverlaps(nameservers, netX); err != nil { + t.Fatalf("%s should not overlap %v but it does", netX, nameservers) + } +} + +func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { + _, netX, _ := net.ParseCIDR(CIDRx) + _, netY, _ := net.ParseCIDR(CIDRy) + if !NetworkOverlaps(netX, netY) { + t.Errorf("%v and %v should overlap", netX, netY) + } +} + +func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { + _, netX, _ := net.ParseCIDR(CIDRx) + _, netY, _ := net.ParseCIDR(CIDRy) + if NetworkOverlaps(netX, netY) { + t.Errorf("%v and %v should not overlap", netX, netY) + } +} + +func TestNetworkOverlaps(t *testing.T) { + //netY starts at same IP and ends within netX + AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) + //netY starts within netX and ends at same IP + AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) + //netY starts and ends within netX + AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) + //netY starts at same IP and ends outside of netX + AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) + //netY starts before and ends at same IP of netX + AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) + //netY starts before and ends outside of netX + AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) + //netY starts and ends before netX + AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) + //netX starts and ends before netY + AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) +} + +func TestNetworkRange(t *testing.T) { + // Simple class C test + _, network, _ := net.ParseCIDR("192.168.0.1/24") + first, last := NetworkRange(network) + if !first.Equal(net.ParseIP("192.168.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("192.168.0.255")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 256 { + t.Error(size) + } + + // Class A test + _, network, _ = net.ParseCIDR("10.0.0.1/8") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.0.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.255.255.255")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 16777216 { + t.Error(size) + } + + // Class A, random IP address + _, network, _ = net.ParseCIDR("10.1.2.3/8") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.0.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.255.255.255")) { + t.Error(last.String()) + } + + // 32bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/32") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.3")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.3")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 1 { + t.Error(size) + } + + // 31bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/31") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.2")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.3")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 2 { + t.Error(size) + } + + // 26bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/26") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.63")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 64 { + t.Error(size) + } +} diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go new file mode 100644 index 00000000..d4fcc6e7 --- /dev/null +++ b/daemon/networkdriver/portallocator/portallocator.go @@ -0,0 +1,156 @@ +package portallocator + +import ( + "errors" + "fmt" + "net" + "sync" +) + +type portMap struct { + p map[int]struct{} + last int +} + +func newPortMap() *portMap { + return &portMap{ + p: map[int]struct{}{}, + } +} + +type protoMap map[string]*portMap + +func newProtoMap() protoMap { + return protoMap{ + "tcp": newPortMap(), + "udp": newPortMap(), + } +} + +type ipMapping map[string]protoMap + +const ( + BeginPortRange = 49153 + EndPortRange = 65535 +) + +var ( + ErrAllPortsAllocated = errors.New("all ports are allocated") + ErrUnknownProtocol = errors.New("unknown protocol") +) + +var ( + mutex sync.Mutex + + defaultIP = net.ParseIP("0.0.0.0") + globalMap = ipMapping{} +) + +type ErrPortAlreadyAllocated struct { + ip string + port int +} + +func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated { + return ErrPortAlreadyAllocated{ + ip: ip, + port: port, + } +} + +func (e ErrPortAlreadyAllocated) IP() string { + return e.ip +} + +func (e ErrPortAlreadyAllocated) Port() int { + return e.port +} + +func (e ErrPortAlreadyAllocated) IPPort() string { + return fmt.Sprintf("%s:%d", e.ip, e.port) +} + +func (e ErrPortAlreadyAllocated) Error() string { + return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port) +} + +// RequestPort requests new port from global ports pool for specified ip and proto. +// If port is 0 it returns first free port. Otherwise it cheks port availability +// in pool and return that port or error if port is already busy. +func RequestPort(ip net.IP, proto string, port int) (int, error) { + mutex.Lock() + defer mutex.Unlock() + + if proto != "tcp" && proto != "udp" { + return 0, ErrUnknownProtocol + } + + if ip == nil { + ip = defaultIP + } + ipstr := ip.String() + protomap, ok := globalMap[ipstr] + if !ok { + protomap = newProtoMap() + globalMap[ipstr] = protomap + } + mapping := protomap[proto] + if port > 0 { + if _, ok := mapping.p[port]; !ok { + mapping.p[port] = struct{}{} + return port, nil + } + return 0, NewErrPortAlreadyAllocated(ipstr, port) + } + + port, err := mapping.findPort() + if err != nil { + return 0, err + } + return port, nil +} + +// ReleasePort releases port from global ports pool for specified ip and proto. +func ReleasePort(ip net.IP, proto string, port int) error { + mutex.Lock() + defer mutex.Unlock() + + if ip == nil { + ip = defaultIP + } + protomap, ok := globalMap[ip.String()] + if !ok { + return nil + } + delete(protomap[proto].p, port) + return nil +} + +// ReleaseAll releases all ports for all ips. +func ReleaseAll() error { + mutex.Lock() + globalMap = ipMapping{} + mutex.Unlock() + return nil +} + +func (pm *portMap) findPort() (int, error) { + if pm.last == 0 { + pm.p[BeginPortRange] = struct{}{} + pm.last = BeginPortRange + return BeginPortRange, nil + } + + for port := pm.last + 1; port != pm.last; port++ { + if port > EndPortRange { + port = BeginPortRange + } + + if _, ok := pm.p[port]; !ok { + pm.p[port] = struct{}{} + pm.last = port + return port, nil + } + } + return 0, ErrAllPortsAllocated +} diff --git a/daemon/networkdriver/portallocator/portallocator_test.go b/daemon/networkdriver/portallocator/portallocator_test.go new file mode 100644 index 00000000..9869c332 --- /dev/null +++ b/daemon/networkdriver/portallocator/portallocator_test.go @@ -0,0 +1,216 @@ +package portallocator + +import ( + "net" + "testing" +) + +func reset() { + ReleaseAll() +} + +func TestRequestNewPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + + if expected := BeginPortRange; port != expected { + t.Fatalf("Expected port %d got %d", expected, port) + } +} + +func TestRequestSpecificPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } +} + +func TestReleasePort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + t.Fatal(err) + } +} + +func TestReuseReleasedPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + t.Fatal(err) + } + + port, err = RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } +} + +func TestReleaseUnreadledPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + port, err = RequestPort(defaultIP, "tcp", 5000) + + switch err.(type) { + case ErrPortAlreadyAllocated: + default: + t.Fatalf("Expected port allocation error got %s", err) + } +} + +func TestUnknowProtocol(t *testing.T) { + defer reset() + + if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { + t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) + } +} + +func TestAllocateAllPorts(t *testing.T) { + defer reset() + + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + + if expected := BeginPortRange + i; port != expected { + t.Fatalf("Expected port %d got %d", expected, port) + } + } + + if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated { + t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err) + } + + _, err := RequestPort(defaultIP, "udp", 0) + if err != nil { + t.Fatal(err) + } + + // release a port in the middle and ensure we get another tcp port + port := BeginPortRange + 5 + if err := ReleasePort(defaultIP, "tcp", port); err != nil { + t.Fatal(err) + } + newPort, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if newPort != port { + t.Fatalf("Expected port %d got %d", port, newPort) + } +} + +func BenchmarkAllocatePorts(b *testing.B) { + defer reset() + + for i := 0; i < b.N; i++ { + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + b.Fatal(err) + } + + if expected := BeginPortRange + i; port != expected { + b.Fatalf("Expected port %d got %d", expected, port) + } + } + reset() + } +} + +func TestPortAllocation(t *testing.T) { + defer reset() + + ip := net.ParseIP("192.168.0.1") + ip2 := net.ParseIP("192.168.0.2") + if port, err := RequestPort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } else if port != 80 { + t.Fatalf("Acquire(80) should return 80, not %d", port) + } + port, err := RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if port <= 0 { + t.Fatalf("Acquire(0) should return a non-zero port") + } + + if _, err := RequestPort(ip, "tcp", port); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + + if newPort, err := RequestPort(ip, "tcp", 0); err != nil { + t.Fatal(err) + } else if newPort == port { + t.Fatalf("Acquire(0) allocated the same port twice: %d", port) + } + + if _, err := RequestPort(ip, "tcp", 80); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + if _, err := RequestPort(ip2, "tcp", 80); err != nil { + t.Fatalf("It should be possible to allocate the same port on a different interface") + } + if _, err := RequestPort(ip2, "tcp", 80); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + if err := ReleasePort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } + if _, err := RequestPort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } + + port, err = RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } + port2, err := RequestPort(ip, "tcp", port+1) + if err != nil { + t.Fatal(err) + } + port3, err := RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if port3 == port2 { + t.Fatal("Requesting a dynamic port should never allocate a used port") + } +} diff --git a/daemon/networkdriver/portmapper/mapper.go b/daemon/networkdriver/portmapper/mapper.go new file mode 100644 index 00000000..24ca0d89 --- /dev/null +++ b/daemon/networkdriver/portmapper/mapper.go @@ -0,0 +1,176 @@ +package portmapper + +import ( + "errors" + "fmt" + "net" + "sync" + + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/pkg/iptables" + "github.com/docker/docker/pkg/log" +) + +type mapping struct { + proto string + userlandProxy UserlandProxy + host net.Addr + container net.Addr +} + +var ( + chain *iptables.Chain + lock sync.Mutex + + // udp:ip:port + currentMappings = make(map[string]*mapping) + + NewProxy = NewProxyCommand +) + +var ( + ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") + ErrPortMappedForIP = errors.New("port is already mapped to ip") + ErrPortNotMapped = errors.New("port is not mapped") +) + +func SetIptablesChain(c *iptables.Chain) { + chain = c +} + +func Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) { + lock.Lock() + defer lock.Unlock() + + var ( + m *mapping + proto string + allocatedHostPort int + proxy UserlandProxy + ) + + switch container.(type) { + case *net.TCPAddr: + proto = "tcp" + if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil { + return nil, err + } + + m = &mapping{ + proto: proto, + host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort}, + container: container, + } + + proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port) + case *net.UDPAddr: + proto = "udp" + if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil { + return nil, err + } + + m = &mapping{ + proto: proto, + host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort}, + container: container, + } + + proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port) + default: + return nil, ErrUnknownBackendAddressType + } + + // release the allocated port on any further error during return. + defer func() { + if err != nil { + portallocator.ReleasePort(hostIP, proto, allocatedHostPort) + } + }() + + key := getKey(m.host) + if _, exists := currentMappings[key]; exists { + return nil, ErrPortMappedForIP + } + + containerIP, containerPort := getIPAndPort(m.container) + if err := forward(iptables.Add, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil { + return nil, err + } + + cleanup := func() error { + // need to undo the iptables rules before we return + proxy.Stop() + forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort) + if err := portallocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil { + return err + } + + return nil + } + + if err := proxy.Start(); err != nil { + if err := cleanup(); err != nil { + return nil, fmt.Errorf("Error during port allocation cleanup: %v", err) + } + return nil, err + } + m.userlandProxy = proxy + currentMappings[key] = m + return m.host, nil +} + +func Unmap(host net.Addr) error { + lock.Lock() + defer lock.Unlock() + + key := getKey(host) + data, exists := currentMappings[key] + if !exists { + return ErrPortNotMapped + } + + data.userlandProxy.Stop() + + delete(currentMappings, key) + + containerIP, containerPort := getIPAndPort(data.container) + hostIP, hostPort := getIPAndPort(data.host) + if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + log.Errorf("Error on iptables delete: %s", err) + } + + switch a := host.(type) { + case *net.TCPAddr: + return portallocator.ReleasePort(a.IP, "tcp", a.Port) + case *net.UDPAddr: + return portallocator.ReleasePort(a.IP, "udp", a.Port) + } + return nil +} + +func getKey(a net.Addr) string { + switch t := a.(type) { + case *net.TCPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") + case *net.UDPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") + } + return "" +} + +func getIPAndPort(a net.Addr) (net.IP, int) { + switch t := a.(type) { + case *net.TCPAddr: + return t.IP, t.Port + case *net.UDPAddr: + return t.IP, t.Port + } + return nil, 0 +} + +func forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { + if chain == nil { + return nil + } + return chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) +} diff --git a/daemon/networkdriver/portmapper/mapper_test.go b/daemon/networkdriver/portmapper/mapper_test.go new file mode 100644 index 00000000..42e44a11 --- /dev/null +++ b/daemon/networkdriver/portmapper/mapper_test.go @@ -0,0 +1,152 @@ +package portmapper + +import ( + "net" + "testing" + + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/pkg/iptables" +) + +func init() { + // override this func to mock out the proxy server + NewProxy = NewMockProxyCommand +} + +func reset() { + chain = nil + currentMappings = make(map[string]*mapping) +} + +func TestSetIptablesChain(t *testing.T) { + defer reset() + + c := &iptables.Chain{ + Name: "TEST", + Bridge: "192.168.1.1", + } + + if chain != nil { + t.Fatal("chain should be nil at init") + } + + SetIptablesChain(c) + if chain == nil { + t.Fatal("chain should not be nil after set") + } +} + +func TestMapPorts(t *testing.T) { + dstIp1 := net.ParseIP("192.168.0.1") + dstIp2 := net.ParseIP("192.168.0.2") + dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} + dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} + + srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} + srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} + + addrEqual := func(addr1, addr2 net.Addr) bool { + return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String()) + } + + if host, err := Map(srcAddr1, dstIp1, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } else if !addrEqual(dstAddr1, host) { + t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s", + dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network()) + } + + if _, err := Map(srcAddr1, dstIp1, 80); err == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if _, err := Map(srcAddr2, dstIp1, 80); err == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if _, err := Map(srcAddr2, dstIp2, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } + + if Unmap(dstAddr1) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) == nil { + t.Fatalf("Port already released, but no error reported") + } +} + +func TestGetUDPKey(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + key := getKey(addr) + + if expected := "192.168.1.5:53/udp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetTCPKey(t *testing.T) { + addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} + + key := getKey(addr) + + if expected := "192.168.1.5:80/tcp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetUDPIPAndPort(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + ip, port := getIPAndPort(addr) + if expected := "192.168.1.5"; ip.String() != expected { + t.Fatalf("expected ip %s got %s", expected, ip) + } + + if ep := 53; port != ep { + t.Fatalf("expected port %d got %d", ep, port) + } +} + +func TestMapAllPortsSingleInterface(t *testing.T) { + dstIp1 := net.ParseIP("0.0.0.0") + srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} + + hosts := []net.Addr{} + var host net.Addr + var err error + + defer func() { + for _, val := range hosts { + Unmap(val) + } + }() + + for i := 0; i < 10; i++ { + for i := portallocator.BeginPortRange; i < portallocator.EndPortRange; i++ { + if host, err = Map(srcAddr1, dstIp1, 0); err != nil { + t.Fatal(err) + } + + hosts = append(hosts, host) + } + + if _, err := Map(srcAddr1, dstIp1, portallocator.BeginPortRange); err == nil { + t.Fatalf("Port %d should be bound but is not", portallocator.BeginPortRange) + } + + for _, val := range hosts { + if err := Unmap(val); err != nil { + t.Fatal(err) + } + } + + hosts = []net.Addr{} + } +} diff --git a/daemon/networkdriver/portmapper/mock_proxy.go b/daemon/networkdriver/portmapper/mock_proxy.go new file mode 100644 index 00000000..253ce831 --- /dev/null +++ b/daemon/networkdriver/portmapper/mock_proxy.go @@ -0,0 +1,18 @@ +package portmapper + +import "net" + +func NewMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy { + return &mockProxyCommand{} +} + +type mockProxyCommand struct { +} + +func (p *mockProxyCommand) Start() error { + return nil +} + +func (p *mockProxyCommand) Stop() error { + return nil +} diff --git a/daemon/networkdriver/portmapper/proxy.go b/daemon/networkdriver/portmapper/proxy.go new file mode 100644 index 00000000..af20469e --- /dev/null +++ b/daemon/networkdriver/portmapper/proxy.go @@ -0,0 +1,156 @@ +package portmapper + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "os/signal" + "strconv" + "syscall" + "time" + + "github.com/docker/docker/pkg/proxy" + "github.com/docker/docker/pkg/reexec" +) + +const userlandProxyCommandName = "docker-proxy" + +func init() { + reexec.Register(userlandProxyCommandName, execProxy) +} + +type UserlandProxy interface { + Start() error + Stop() error +} + +// proxyCommand wraps an exec.Cmd to run the userland TCP and UDP +// proxies as separate processes. +type proxyCommand struct { + cmd *exec.Cmd +} + +// execProxy is the reexec function that is registered to start the userland proxies +func execProxy() { + f := os.NewFile(3, "signal-parent") + host, container := parseHostContainerAddrs() + + p, err := proxy.NewProxy(host, container) + if err != nil { + fmt.Fprintf(f, "1\n%s", err) + f.Close() + os.Exit(1) + } + go handleStopSignals(p) + fmt.Fprint(f, "0\n") + f.Close() + + // Run will block until the proxy stops + p.Run() +} + +// parseHostContainerAddrs parses the flags passed on reexec to create the TCP or UDP +// net.Addrs to map the host and container ports +func parseHostContainerAddrs() (host net.Addr, container net.Addr) { + var ( + proto = flag.String("proto", "tcp", "proxy protocol") + hostIP = flag.String("host-ip", "", "host ip") + hostPort = flag.Int("host-port", -1, "host port") + containerIP = flag.String("container-ip", "", "container ip") + containerPort = flag.Int("container-port", -1, "container port") + ) + + flag.Parse() + + switch *proto { + case "tcp": + host = &net.TCPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort} + container = &net.TCPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort} + case "udp": + host = &net.UDPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort} + container = &net.UDPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort} + default: + log.Fatalf("unsupported protocol %s", *proto) + } + + return host, container +} + +func handleStopSignals(p proxy.Proxy) { + s := make(chan os.Signal, 10) + signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP) + + for _ = range s { + p.Close() + + os.Exit(0) + } +} + +func NewProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy { + args := []string{ + userlandProxyCommandName, + "-proto", proto, + "-host-ip", hostIP.String(), + "-host-port", strconv.Itoa(hostPort), + "-container-ip", containerIP.String(), + "-container-port", strconv.Itoa(containerPort), + } + + return &proxyCommand{ + cmd: &exec.Cmd{ + Path: reexec.Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, // send a sigterm to the proxy if the daemon process dies + }, + }, + } +} + +func (p *proxyCommand) Start() error { + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("proxy unable to open os.Pipe %s", err) + } + defer r.Close() + p.cmd.ExtraFiles = []*os.File{w} + if err := p.cmd.Start(); err != nil { + return err + } + w.Close() + + errchan := make(chan error, 1) + go func() { + buf := make([]byte, 2) + r.Read(buf) + + if string(buf) != "0\n" { + errStr, _ := ioutil.ReadAll(r) + errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr) + return + } + errchan <- nil + }() + + select { + case err := <-errchan: + return err + case <-time.After(1 * time.Second): + return fmt.Errorf("Timed out proxy starting the userland proxy") + } +} + +func (p *proxyCommand) Stop() error { + if p.cmd.Process != nil { + if err := p.cmd.Process.Signal(os.Interrupt); err != nil { + return err + } + return p.cmd.Wait() + } + return nil +} diff --git a/daemon/networkdriver/utils.go b/daemon/networkdriver/utils.go new file mode 100644 index 00000000..410d6010 --- /dev/null +++ b/daemon/networkdriver/utils.go @@ -0,0 +1,118 @@ +package networkdriver + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/docker/libcontainer/netlink" +) + +var ( + networkGetRoutesFct = netlink.NetworkGetRoutes + ErrNoDefaultRoute = errors.New("no default route") +) + +func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { + if len(nameservers) > 0 { + for _, ns := range nameservers { + _, nsNetwork, err := net.ParseCIDR(ns) + if err != nil { + return err + } + if NetworkOverlaps(toCheck, nsNetwork) { + return ErrNetworkOverlapsWithNameservers + } + } + } + return nil +} + +func CheckRouteOverlaps(toCheck *net.IPNet) error { + networks, err := networkGetRoutesFct() + if err != nil { + return err + } + + for _, network := range networks { + if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) { + return ErrNetworkOverlaps + } + } + return nil +} + +// Detects overlap between one IPNet and another +func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { + if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) { + return true + } + if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) { + return true + } + return false +} + +// Calculates the first and last IP addresses in an IPNet +func NetworkRange(network *net.IPNet) (net.IP, net.IP) { + var ( + netIP = network.IP.To4() + firstIP = netIP.Mask(network.Mask) + lastIP = net.IPv4(0, 0, 0, 0).To4() + ) + + for i := 0; i < len(lastIP); i++ { + lastIP[i] = netIP[i] | ^network.Mask[i] + } + return firstIP, lastIP +} + +// Given a netmask, calculates the number of available hosts +func NetworkSize(mask net.IPMask) int32 { + m := net.IPv4Mask(0, 0, 0, 0) + for i := 0; i < net.IPv4len; i++ { + m[i] = ^mask[i] + } + return int32(binary.BigEndian.Uint32(m)) + 1 +} + +// Return the IPv4 address of a network interface +func GetIfaceAddr(name string) (net.Addr, error) { + iface, err := net.InterfaceByName(name) + if err != nil { + return nil, err + } + addrs, err := iface.Addrs() + if err != nil { + return nil, err + } + var addrs4 []net.Addr + for _, addr := range addrs { + ip := (addr.(*net.IPNet)).IP + if ip4 := ip.To4(); len(ip4) == net.IPv4len { + addrs4 = append(addrs4, addr) + } + } + switch { + case len(addrs4) == 0: + return nil, fmt.Errorf("Interface %v has no IP addresses", name) + case len(addrs4) > 1: + fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", + name, (addrs4[0].(*net.IPNet)).IP) + } + return addrs4[0], nil +} + +func GetDefaultRouteIface() (*net.Interface, error) { + rs, err := networkGetRoutesFct() + if err != nil { + return nil, fmt.Errorf("unable to get routes: %v", err) + } + for _, r := range rs { + if r.Default { + return r.Iface, nil + } + } + return nil, ErrNoDefaultRoute +} diff --git a/daemon/pause.go b/daemon/pause.go new file mode 100644 index 00000000..0e4323d9 --- /dev/null +++ b/daemon/pause.go @@ -0,0 +1,37 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + if err := container.Pause(); err != nil { + return job.Errorf("Cannot pause container %s: %s", name, err) + } + container.LogEvent("pause") + return engine.StatusOK +} + +func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status { + if n := len(job.Args); n < 1 || n > 2 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + if err := container.Unpause(); err != nil { + return job.Errorf("Cannot unpause container %s: %s", name, err) + } + container.LogEvent("unpause") + return engine.StatusOK +} diff --git a/daemon/resize.go b/daemon/resize.go new file mode 100644 index 00000000..68c07037 --- /dev/null +++ b/daemon/resize.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "strconv" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) + } + name := job.Args[0] + height, err := strconv.Atoi(job.Args[1]) + if err != nil { + return job.Error(err) + } + width, err := strconv.Atoi(job.Args[2]) + if err != nil { + return job.Error(err) + } + + if container := daemon.Get(name); container != nil { + if err := container.Resize(height, width); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} + +func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name) + } + name := job.Args[0] + height, err := strconv.Atoi(job.Args[1]) + if err != nil { + return job.Error(err) + } + width, err := strconv.Atoi(job.Args[2]) + if err != nil { + return job.Error(err) + } + execConfig, err := daemon.getExecConfig(name) + if err != nil { + return job.Error(err) + } + if err := execConfig.Resize(height, width); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/daemon/restart.go b/daemon/restart.go new file mode 100644 index 00000000..bcc05715 --- /dev/null +++ b/daemon/restart.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } + if container := daemon.Get(name); container != nil { + if err := container.Restart(int(t)); err != nil { + return job.Errorf("Cannot restart container %s: %s\n", name, err) + } + container.LogEvent("restart") + } else { + return job.Errorf("No such container: %s\n", name) + } + return engine.StatusOK +} diff --git a/daemon/start.go b/daemon/start.go new file mode 100644 index 00000000..f72407e3 --- /dev/null +++ b/daemon/start.go @@ -0,0 +1,73 @@ +package daemon + +import ( + "fmt" + "os" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { + if len(job.Args) < 1 { + return job.Errorf("Usage: %s container_id", job.Name) + } + var ( + name = job.Args[0] + container = daemon.Get(name) + ) + + if container == nil { + return job.Errorf("No such container: %s", name) + } + + if container.IsRunning() { + return job.Errorf("Container already started") + } + + // If no environment was set, then no hostconfig was passed. + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if len(job.Environ()) > 0 { + hostConfig := runconfig.ContainerHostConfigFromJob(job) + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return job.Error(err) + } + } + if err := container.Start(); err != nil { + container.LogEvent("die") + return job.Errorf("Cannot start container %s: %s", name, err) + } + + return engine.StatusOK +} + +func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { + if err := parseSecurityOpt(container, hostConfig); err != nil { + return err + } + // Validate the HostConfig binds. Make sure that: + // the source exists + for _, bind := range hostConfig.Binds { + splitBind := strings.Split(bind, ":") + source := splitBind[0] + + // ensure the source exists on the host + _, err := os.Stat(source) + if err != nil && os.IsNotExist(err) { + err = os.MkdirAll(source, 0755) + if err != nil { + return fmt.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) + } + } + } + // Register any links from the host config before starting the container + if err := daemon.RegisterLinks(container, hostConfig); err != nil { + return err + } + container.SetHostConfig(hostConfig) + container.ToDisk() + + return nil +} diff --git a/daemon/state.go b/daemon/state.go new file mode 100644 index 00000000..b7dc1499 --- /dev/null +++ b/daemon/state.go @@ -0,0 +1,206 @@ +package daemon + +import ( + "fmt" + "sync" + "time" + + "github.com/docker/docker/pkg/units" +) + +type State struct { + sync.Mutex + Running bool + Paused bool + Restarting bool + Pid int + ExitCode int + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} +} + +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + return "exited" +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitRunning waits until state is running. If state already running it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns pid, that was passed to SetRunning +func (s *State) WaitRunning(timeout time.Duration) (int, error) { + s.Lock() + if s.Running { + pid := s.Pid + s.Unlock() + return pid, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetPid(), nil +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStopped +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCode + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetExitCode(), nil +} + +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +func (s *State) GetPid() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +func (s *State) GetExitCode() int { + s.Lock() + res := s.ExitCode + s.Unlock() + return res +} + +func (s *State) SetRunning(pid int) { + s.Lock() + s.setRunning(pid) + s.Unlock() +} + +func (s *State) setRunning(pid int) { + s.Running = true + s.Paused = false + s.Restarting = false + s.ExitCode = 0 + s.Pid = pid + s.StartedAt = time.Now().UTC() + close(s.waitChan) // fire waiters for start + s.waitChan = make(chan struct{}) +} + +func (s *State) SetStopped(exitCode int) { + s.Lock() + s.setStopped(exitCode) + s.Unlock() +} + +func (s *State) setStopped(exitCode int) { + s.Running = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitCode + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestarting is when docker hanldes the auto restart of containers when they are +// in the middle of a stop and being restarted again +func (s *State) SetRestarting(exitCode int) { + s.Lock() + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitCode + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) + s.Unlock() +} + +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +func (s *State) SetPaused() { + s.Lock() + s.Paused = true + s.Unlock() +} + +func (s *State) SetUnpaused() { + s.Lock() + s.Paused = false + s.Unlock() +} + +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} diff --git a/daemon/state_test.go b/daemon/state_test.go new file mode 100644 index 00000000..35524356 --- /dev/null +++ b/daemon/state_test.go @@ -0,0 +1,102 @@ +package daemon + +import ( + "sync/atomic" + "testing" + "time" +) + +func TestStateRunStop(t *testing.T) { + s := NewState() + for i := 1; i < 3; i++ { // full lifecycle two times + started := make(chan struct{}) + var pid int64 + go func() { + runPid, _ := s.WaitRunning(-1 * time.Second) + atomic.StoreInt64(&pid, int64(runPid)) + close(started) + }() + s.SetRunning(i + 100) + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i+100 { + t.Fatalf("Pid %v, expected %v", s.Pid, i+100) + } + if s.ExitCode != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-started: + t.Log("Start callback fired") + } + runPid := int(atomic.LoadInt64(&pid)) + if runPid != i+100 { + t.Fatalf("Pid %v, expected %v", runPid, i+100) + } + if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 { + t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) + } + + stopped := make(chan struct{}) + var exit int64 + go func() { + exitCode, _ := s.WaitStop(-1 * time.Second) + atomic.StoreInt64(&exit, int64(exitCode)) + close(stopped) + }() + s.SetStopped(i) + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + exitCode := int(atomic.LoadInt64(&exit)) + if exitCode != i { + t.Fatalf("ExitCode %v, expected %v", exitCode, i) + } + if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { + t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) + } + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + started := make(chan struct{}) + go func() { + s.WaitRunning(100 * time.Millisecond) + close(started) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-started: + t.Log("Start callback fired") + } + s.SetRunning(42) + stopped := make(chan struct{}) + go func() { + s.WaitRunning(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Start callback fired") + } + +} diff --git a/daemon/stop.go b/daemon/stop.go new file mode 100644 index 00000000..1a098a1a --- /dev/null +++ b/daemon/stop.go @@ -0,0 +1,30 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } + if container := daemon.Get(name); container != nil { + if !container.IsRunning() { + return job.Errorf("Container already stopped") + } + if err := container.Stop(int(t)); err != nil { + return job.Errorf("Cannot stop container %s: %s\n", name, err) + } + container.LogEvent("stop") + } else { + return job.Errorf("No such container: %s\n", name) + } + return engine.StatusOK +} diff --git a/daemon/top.go b/daemon/top.go new file mode 100644 index 00000000..4d916ee5 --- /dev/null +++ b/daemon/top.go @@ -0,0 +1,79 @@ +package daemon + +import ( + "os/exec" + "strconv" + "strings" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status { + if len(job.Args) != 1 && len(job.Args) != 2 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) + } + var ( + name = job.Args[0] + psArgs = "-ef" + ) + + if len(job.Args) == 2 && job.Args[1] != "" { + psArgs = job.Args[1] + } + + if container := daemon.Get(name); container != nil { + if !container.IsRunning() { + return job.Errorf("Container %s is not running", name) + } + pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) + if err != nil { + return job.Error(err) + } + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return job.Errorf("Error running ps: %s", err) + } + + lines := strings.Split(string(output), "\n") + header := strings.Fields(lines[0]) + out := &engine.Env{} + out.SetList("Titles", header) + + pidIndex := -1 + for i, name := range header { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return job.Errorf("Couldn't find PID field in ps output") + } + + processes := [][]string{} + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(header)-1] + process = append(process, strings.Join(fields[len(header)-1:], " ")) + processes = append(processes, process) + } + } + } + out.SetJson("Processes", processes) + out.WriteTo(job.Stdout) + return engine.StatusOK + + } + return job.Errorf("No such container: %s", name) +} diff --git a/daemon/utils.go b/daemon/utils.go new file mode 100644 index 00000000..9c43236e --- /dev/null +++ b/daemon/utils.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/runconfig" +) + +func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { + if config.PortSpecs != nil { + ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) + if err != nil { + return err + } + config.PortSpecs = nil + if len(bindings) > 0 { + if hostConfig == nil { + hostConfig = &runconfig.HostConfig{} + } + hostConfig.PortBindings = bindings + } + + if config.ExposedPorts == nil { + config.ExposedPorts = make(nat.PortSet, len(ports)) + } + for k, v := range ports { + config.ExposedPorts[k] = v + } + } + return nil +} + +func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string { + if hostConfig == nil { + return nil + } + + out := []string{} + + // merge in the lxc conf options into the generic config map + if lxcConf := hostConfig.LxcConf; lxcConf != nil { + for _, pair := range lxcConf { + // because lxc conf gets the driver name lxc.XXXX we need to trim it off + // and let the lxc driver add it back later if needed + parts := strings.SplitN(pair.Key, ".", 2) + out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value)) + } + } + + return out +} diff --git a/daemon/utils_linux.go b/daemon/utils_linux.go new file mode 100644 index 00000000..fb35152f --- /dev/null +++ b/daemon/utils_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/docker/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff --git a/daemon/utils_nolinux.go b/daemon/utils_nolinux.go new file mode 100644 index 00000000..25a56ad1 --- /dev/null +++ b/daemon/utils_nolinux.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/daemon/utils_test.go b/daemon/utils_test.go new file mode 100644 index 00000000..7748b860 --- /dev/null +++ b/daemon/utils_test.go @@ -0,0 +1,54 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +func TestMergeLxcConfig(t *testing.T) { + hostConfig := &runconfig.HostConfig{ + LxcConf: []utils.KeyValuePair{ + {Key: "lxc.cgroups.cpuset", Value: "1,2"}, + }, + } + + out := mergeLxcConfIntoOptions(hostConfig) + + cpuset := out[0] + if expected := "cgroups.cpuset=1,2"; cpuset != expected { + t.Fatalf("expected %s got %s", expected, cpuset) + } +} + +func TestRemoveLocalDns(t *testing.T) { + ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n" + + if result := utils.RemoveLocalDns([]byte(ns0)); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n" + if result := utils.RemoveLocalDns([]byte(ns1)); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n" + if result := utils.RemoveLocalDns([]byte(ns1)); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n" + if result := utils.RemoveLocalDns([]byte(ns1)); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } +} diff --git a/daemon/volumes.go b/daemon/volumes.go new file mode 100644 index 00000000..f98baa1c --- /dev/null +++ b/daemon/volumes.go @@ -0,0 +1,333 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volumes" +) + +type Mount struct { + MountToPath string + container *Container + volume *volumes.Volume + Writable bool + copyData bool +} + +func (container *Container) prepareVolumes() error { + if container.Volumes == nil || len(container.Volumes) == 0 { + container.Volumes = make(map[string]string) + container.VolumesRW = make(map[string]bool) + if err := container.applyVolumesFrom(); err != nil { + return err + } + } + + return container.createVolumes() +} + +// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order +func (container *Container) sortedVolumeMounts() []string { + var mountPaths []string + for path := range container.Volumes { + mountPaths = append(mountPaths, path) + } + + sort.Strings(mountPaths) + return mountPaths +} + +func (container *Container) createVolumes() error { + mounts, err := container.parseVolumeMountConfig() + if err != nil { + return err + } + + for _, mnt := range mounts { + if err := mnt.initialize(); err != nil { + return err + } + } + + return nil +} + +func (m *Mount) initialize() error { + // No need to initialize anything since it's already been initialized + if _, exists := m.container.Volumes[m.MountToPath]; exists { + return nil + } + + // This is the full path to container fs + mntToPath + containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(m.container.basefs, m.MountToPath), m.container.basefs) + if err != nil { + return err + } + m.container.VolumesRW[m.MountToPath] = m.Writable + m.container.Volumes[m.MountToPath] = m.volume.Path + m.volume.AddContainer(m.container.ID) + if m.Writable && m.copyData { + // Copy whatever is in the container at the mntToPath to the volume + copyExistingContents(containerMntPath, m.volume.Path) + } + + return nil +} + +func (container *Container) VolumePaths() map[string]struct{} { + var paths = make(map[string]struct{}) + for _, path := range container.Volumes { + paths[path] = struct{}{} + } + return paths +} + +func (container *Container) registerVolumes() { + for _, mnt := range container.VolumeMounts() { + mnt.volume.AddContainer(container.ID) + } +} + +func (container *Container) derefVolumes() { + for path := range container.VolumePaths() { + vol := container.daemon.volumes.Get(path) + if vol == nil { + log.Debugf("Volume %s was not found and could not be dereferenced", path) + continue + } + vol.RemoveContainer(container.ID) + } +} + +func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) { + var mounts = make(map[string]*Mount) + // Get all the bind mounts + for _, spec := range container.hostConfig.Binds { + path, mountToPath, writable, err := parseBindMountSpec(spec) + if err != nil { + return nil, err + } + // Check if a volume already exists for this and use it + vol, err := container.daemon.volumes.FindOrCreateVolume(path, writable) + if err != nil { + return nil, err + } + mounts[mountToPath] = &Mount{ + container: container, + volume: vol, + MountToPath: mountToPath, + Writable: writable, + } + } + + // Get the rest of the volumes + for path := range container.Config.Volumes { + // Check if this is already added as a bind-mount + path = filepath.Clean(path) + if _, exists := mounts[path]; exists { + continue + } + + // Check if this has already been created + if _, exists := container.Volumes[path]; exists { + continue + } + + vol, err := container.daemon.volumes.FindOrCreateVolume("", true) + if err != nil { + return nil, err + } + mounts[path] = &Mount{ + container: container, + MountToPath: path, + volume: vol, + Writable: true, + copyData: true, + } + } + + return mounts, nil +} + +func parseBindMountSpec(spec string) (string, string, bool, error) { + var ( + path, mountToPath string + writable bool + arr = strings.Split(spec, ":") + ) + + switch len(arr) { + case 2: + path = arr[0] + mountToPath = arr[1] + writable = true + case 3: + path = arr[0] + mountToPath = arr[1] + writable = validMountMode(arr[2]) && arr[2] == "rw" + default: + return "", "", false, fmt.Errorf("Invalid volume specification: %s", spec) + } + + if !filepath.IsAbs(path) { + return "", "", false, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", path) + } + + path = filepath.Clean(path) + mountToPath = filepath.Clean(mountToPath) + return path, mountToPath, writable, nil +} + +func (container *Container) applyVolumesFrom() error { + volumesFrom := container.hostConfig.VolumesFrom + + mountGroups := make([]map[string]*Mount, 0, len(volumesFrom)) + + for _, spec := range volumesFrom { + mountGroup, err := parseVolumesFromSpec(container.daemon, spec) + if err != nil { + return err + } + mountGroups = append(mountGroups, mountGroup) + } + + for _, mounts := range mountGroups { + for _, mnt := range mounts { + mnt.container = container + if err := mnt.initialize(); err != nil { + return err + } + } + } + return nil +} + +func validMountMode(mode string) bool { + validModes := map[string]bool{ + "rw": true, + "ro": true, + } + + return validModes[mode] +} + +func (container *Container) setupMounts() error { + mounts := []execdriver.Mount{ + {Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true}, + } + + if container.HostnamePath != "" { + mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true}) + } + + if container.HostsPath != "" { + mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) + } + + // Mount user specified volumes + // Note, these are not private because you may want propagation of (un)mounts from host + // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you + // want this new mount in the container + // These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic) + for _, path := range container.sortedVolumeMounts() { + mounts = append(mounts, execdriver.Mount{ + Source: container.Volumes[path], + Destination: path, + Writable: container.VolumesRW[path], + }) + } + + container.command.Mounts = mounts + return nil +} + +func parseVolumesFromSpec(daemon *Daemon, spec string) (map[string]*Mount, error) { + specParts := strings.SplitN(spec, ":", 2) + if len(specParts) == 0 { + return nil, fmt.Errorf("Malformed volumes-from specification: %s", spec) + } + + c := daemon.Get(specParts[0]) + if c == nil { + return nil, fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0]) + } + + mounts := c.VolumeMounts() + + if len(specParts) == 2 { + mode := specParts[1] + if !validMountMode(mode) { + return nil, fmt.Errorf("Invalid mode for volumes-from: %s", mode) + } + + // Set the mode for the inheritted volume + for _, mnt := range mounts { + // Ensure that if the inherited volume is not writable, that we don't make + // it writable here + mnt.Writable = mnt.Writable && (mode == "rw") + } + } + + return mounts, nil +} + +func (container *Container) VolumeMounts() map[string]*Mount { + mounts := make(map[string]*Mount) + + for mountToPath, path := range container.Volumes { + if v := container.daemon.volumes.Get(path); v != nil { + mounts[mountToPath] = &Mount{volume: v, container: container, MountToPath: mountToPath, Writable: container.VolumesRW[mountToPath]} + } + } + + return mounts +} + +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// into the destination file +func copyOwnership(source, destination string) error { + var stat syscall.Stat_t + + if err := syscall.Stat(source, &stat); err != nil { + return err + } + + if err := os.Chown(destination, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode)) +} diff --git a/daemon/wait.go b/daemon/wait.go new file mode 100644 index 00000000..a1f657c3 --- /dev/null +++ b/daemon/wait.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "time" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + status, _ := container.WaitStop(-1 * time.Second) + job.Printf("%d\n", status) + return engine.StatusOK + } + return job.Errorf("%s: No such container: %s", job.Name, name) +} diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..015bc133 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/docker/client.go b/docker/client.go new file mode 100644 index 00000000..27001cc5 --- /dev/null +++ b/docker/client.go @@ -0,0 +1,13 @@ +// +build !daemon + +package main + +import ( + "log" +) + +const CanDaemon = false + +func mainDaemon() { + log.Fatal("This is a client-only binary - running the Docker daemon is not supported.") +} diff --git a/docker/daemon.go b/docker/daemon.go new file mode 100644 index 00000000..8b5826f3 --- /dev/null +++ b/docker/daemon.go @@ -0,0 +1,93 @@ +// +build daemon + +package main + +import ( + "log" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builtins" + "github.com/docker/docker/daemon" + _ "github.com/docker/docker/daemon/execdriver/lxc" + _ "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/registry" +) + +const CanDaemon = true + +var ( + daemonCfg = &daemon.Config{} +) + +func init() { + daemonCfg.InstallFlags() +} + +func mainDaemon() { + if flag.NArg() != 0 { + flag.Usage() + return + } + eng := engine.New() + signal.Trap(eng.Shutdown) + + // Load builtins + if err := builtins.Register(eng); err != nil { + log.Fatal(err) + } + + // load registry service + if err := registry.NewService(daemonCfg.InsecureRegistries).Install(eng); err != nil { + log.Fatal(err) + } + + // load the daemon in the background so we can immediately start + // the http api so that connections don't fail while the daemon + // is booting + go func() { + d, err := daemon.NewDaemon(daemonCfg, eng) + if err != nil { + log.Fatal(err) + } + if err := d.Install(eng); err != nil { + log.Fatal(err) + } + + b := &builder.BuilderJob{eng, d} + b.Install() + + // after the daemon is done setting up we can tell the api to start + // accepting connections + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatal(err) + } + }() + // TODO actually have a resolved graphdriver to show? + log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", + dockerversion.VERSION, + dockerversion.GITCOMMIT, + daemonCfg.ExecDriver, + daemonCfg.GraphDriver, + ) + + // Serve api + job := eng.Job("serveapi", flHosts...) + job.SetenvBool("Logging", true) + job.SetenvBool("EnableCors", *flEnableCors) + job.Setenv("Version", dockerversion.VERSION) + job.Setenv("SocketGroup", *flSocketGroup) + + job.SetenvBool("Tls", *flTls) + job.SetenvBool("TlsVerify", *flTlsVerify) + job.Setenv("TlsCa", *flCa) + job.Setenv("TlsCert", *flCert) + job.Setenv("TlsKey", *flKey) + job.SetenvBool("BufferRequests", true) + if err := job.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/docker/docker.go b/docker/docker.go new file mode 100644 index 00000000..12900ecb --- /dev/null +++ b/docker/docker.go @@ -0,0 +1,119 @@ +package main + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/client" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/utils" +) + +const ( + defaultTrustKeyFile = "key.json" + defaultCaFile = "ca.pem" + defaultKeyFile = "key.pem" + defaultCertFile = "cert.pem" +) + +func main() { + if reexec.Init() { + return + } + flag.Parse() + // FIXME: validate daemon flags here + + if *flVersion { + showVersion() + return + } + if *flDebug { + os.Setenv("DEBUG", "1") + } + + if len(flHosts) == 0 { + defaultHost := os.Getenv("DOCKER_HOST") + if defaultHost == "" || *flDaemon { + // If we do not have a host, default to unix socket + defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) + } + defaultHost, err := api.ValidateHost(defaultHost) + if err != nil { + log.Fatal(err) + } + flHosts = append(flHosts, defaultHost) + } + + if *flDaemon { + mainDaemon() + return + } + + if len(flHosts) > 1 { + log.Fatal("Please specify only one -H") + } + protoAddrParts := strings.SplitN(flHosts[0], "://", 2) + + var ( + cli *client.DockerCli + tlsConfig tls.Config + ) + tlsConfig.InsecureSkipVerify = true + + // If we should verify the server, we need to load a trusted ca + if *flTlsVerify { + *flTls = true + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(*flCa) + if err != nil { + log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) + } + certPool.AppendCertsFromPEM(file) + tlsConfig.RootCAs = certPool + tlsConfig.InsecureSkipVerify = false + } + + // If tls is enabled, try to load and send client certificates + if *flTls || *flTlsVerify { + _, errCert := os.Stat(*flCert) + _, errKey := os.Stat(*flKey) + if errCert == nil && errKey == nil { + *flTls = true + cert, err := tls.LoadX509KeyPair(*flCert, *flKey) + if err != nil { + log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + // Avoid fallback to SSL protocols < TLS1.0 + tlsConfig.MinVersion = tls.VersionTLS10 + } + + if *flTls || *flTlsVerify { + cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, nil, protoAddrParts[0], protoAddrParts[1], &tlsConfig) + } else { + cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, nil, protoAddrParts[0], protoAddrParts[1], nil) + } + + if err := cli.Cmd(flag.Args()...); err != nil { + if sterr, ok := err.(*utils.StatusError); ok { + if sterr.Status != "" { + log.Println(sterr.Status) + } + os.Exit(sterr.StatusCode) + } + log.Fatal(err) + } +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT) +} diff --git a/docker/flags.go b/docker/flags.go new file mode 100644 index 00000000..61081ec9 --- /dev/null +++ b/docker/flags.go @@ -0,0 +1,101 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTlsVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +func init() { + if dockerCertPath == "" { + dockerCertPath = filepath.Join(os.Getenv("HOME"), ".docker") + } +} + +var ( + flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") + flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") + flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") + flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") + flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") + flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") + flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") + + // these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs + flTrustKey *string + flCa *string + flCert *string + flKey *string + flHosts []string +) + +func init() { + // placeholder for trust key flag + trustKeyDefault := filepath.Join(dockerCertPath, defaultTrustKeyFile) + flTrustKey = &trustKeyDefault + + flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust only remotes providing a certificate signed by the CA given here") + flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") + flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") + opts.HostListVar(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") + + flag.Usage = func() { + fmt.Fprint(os.Stderr, "Usage: docker [OPTIONS] COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nOptions:\n") + + flag.PrintDefaults() + + help := "\nCommands:\n" + + for _, command := range [][]string{ + {"attach", "Attach to a running container"}, + {"build", "Build an image from a Dockerfile"}, + {"commit", "Create a new image from a container's changes"}, + {"cp", "Copy files/folders from a container's filesystem to the host path"}, + {"create", "Create a new container"}, + {"diff", "Inspect changes on a container's filesystem"}, + {"events", "Get real time events from the server"}, + {"exec", "Run a command in an existing container"}, + {"export", "Stream the contents of a container as a tar archive"}, + {"history", "Show the history of an image"}, + {"images", "List images"}, + {"import", "Create a new filesystem image from the contents of a tarball"}, + {"info", "Display system-wide information"}, + {"inspect", "Return low-level information on a container"}, + {"kill", "Kill a running container"}, + {"load", "Load an image from a tar archive"}, + {"login", "Register or log in to a Docker registry server"}, + {"logout", "Log out from a Docker registry server"}, + {"logs", "Fetch the logs of a container"}, + {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"}, + {"pause", "Pause all processes within a container"}, + {"ps", "List containers"}, + {"pull", "Pull an image or a repository from a Docker registry server"}, + {"push", "Push an image or a repository to a Docker registry server"}, + {"restart", "Restart a running container"}, + {"rm", "Remove one or more containers"}, + {"rmi", "Remove one or more images"}, + {"run", "Run a command in a new container"}, + {"save", "Save an image to a tar archive"}, + {"search", "Search for an image on the Docker Hub"}, + {"start", "Start a stopped container"}, + {"stop", "Stop a running container"}, + {"tag", "Tag an image into a repository"}, + {"top", "Lookup the running processes of a container"}, + {"unpause", "Unpause a paused container"}, + {"version", "Show the Docker version information"}, + {"wait", "Block until a container stops, then print its exit code"}, + } { + help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) + } + help += "\nRun 'docker COMMAND --help' for more information on a command." + fmt.Fprintf(os.Stderr, "%s\n", help) + } +} diff --git a/dockerinit/dockerinit.go b/dockerinit/dockerinit.go new file mode 100644 index 00000000..a6754b05 --- /dev/null +++ b/dockerinit/dockerinit.go @@ -0,0 +1,12 @@ +package main + +import ( + _ "github.com/docker/docker/daemon/execdriver/lxc" + _ "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/pkg/reexec" +) + +func main() { + // Running in init mode + reexec.Init() +} diff --git a/dockerversion/dockerversion.go b/dockerversion/dockerversion.go new file mode 100644 index 00000000..c130ac28 --- /dev/null +++ b/dockerversion/dockerversion.go @@ -0,0 +1,15 @@ +package dockerversion + +// FIXME: this should be embedded in the docker/docker.go, +// but we can't because distro policy requires us to +// package a separate dockerinit binary, and that binary needs +// to know its version too. + +var ( + GITCOMMIT string + VERSION string + + IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary + INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary + INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch) +) diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 00000000..8da058a8 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,5 @@ +# generated by man/man/md2man-all.sh +man1/ +man5/ +# avoid commiting the awsconfig file used for releases +awsconfig diff --git a/docs/Dockerfile b/docs/Dockerfile new file mode 100644 index 00000000..3c58193b --- /dev/null +++ b/docs/Dockerfile @@ -0,0 +1,49 @@ +# +# See the top level Makefile in https://github.com/docker/docker for usage. +# +FROM debian:jessie +MAINTAINER Sven Dowideit (@SvenDowideit) + +RUN apt-get update && apt-get install -y make python-pip python-setuptools vim-tiny git gettext python-dev libssl-dev + +RUN pip install mkdocs + +# add MarkdownTools to get transclusion +# (future development) +#RUN easy_install -U setuptools +#RUN pip install MarkdownTools2 + +# this version works, the current versions fail in different ways +RUN pip install awscli==1.4.4 pyopenssl==0.12 + +# make sure the git clone is not an old cache - we've published old versions a few times now +ENV CACHE_BUST Jul2014 + +# get my sitemap.xml branch of mkdocs and use that for now +RUN git clone https://github.com/SvenDowideit/mkdocs &&\ + cd mkdocs/ &&\ + git checkout docker-markdown-merge &&\ + ./setup.py install + +ADD . /docs +ADD MAINTAINERS /docs/sources/humans.txt +WORKDIR /docs + +RUN VERSION=$(cat /docs/VERSION) &&\ + MAJOR_MINOR="${VERSION%.*}" &&\ + for i in $(seq $MAJOR_MINOR -0.1 1.0) ; do echo "
  • Version v$i
  • " ; done > /docs/sources/versions.html_fragment &&\ + GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\ + GITCOMMIT=$(cat /docs/GITCOMMIT) &&\ + AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\ + BUILD_DATE=$(date) &&\ + sed -i "s/\$VERSION/$VERSION/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" /docs/theme/mkdocs/base.html + +# note, EXPOSE is only last because of https://github.com/docker/docker/issues/3525 +EXPOSE 8000 + +CMD ["mkdocs", "serve"] diff --git a/docs/MAINTAINERS b/docs/MAINTAINERS new file mode 100644 index 00000000..d07b531d --- /dev/null +++ b/docs/MAINTAINERS @@ -0,0 +1,4 @@ +Fred Lifton (@fredlf) +James Turnbull (@jamtur01) +Sven Dowideit (@SvenDowideit) +O.S. Tezer (@OSTezer) diff --git a/docs/README.md b/docs/README.md new file mode 100755 index 00000000..27ed7eef --- /dev/null +++ b/docs/README.md @@ -0,0 +1,162 @@ +# Docker Documentation + +The source for Docker documentation is here under `sources/` and uses extended +Markdown, as implemented by [MkDocs](http://mkdocs.org). + +The HTML files are built and hosted on `https://docs.docker.com`, and update +automatically after each change to the master or release branch of [Docker on +GitHub](https://github.com/docker/docker) thanks to post-commit hooks. The +`docs` branch maps to the "latest" documentation and the `master` (unreleased +development) branch maps to the "master" documentation. + +## Contributing + +- Follow the contribution guidelines ([see + `../CONTRIBUTING.md`](../CONTRIBUTING.md)). +- [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work) + +## Getting Started + +Docker documentation builds are done in a Docker container, which installs all +the required tools, adds the local `docs/` directory and builds the HTML docs. +It then starts a HTTP server on port 8000 so that you can connect and see your +changes. + +In the root of the `docker` source directory: + + $ make docs + .... (lots of output) .... + $ docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve + Running at: http://0.0.0.0:8000/ + Live reload enabled. + Hold ctrl+c to quit. + +If you have any issues you need to debug, you can use `make docs-shell` and then +run `mkdocs serve` + +## Adding a new document + +New document (`.md`) files are added to the documentation builds by adding them +to the menu definition in the `docs/mkdocs.yml` file. + +## Style guide + +The documentation is written with paragraphs wrapped at 80 column lines to make +it easier for terminal use. + +### Examples + +When writing examples, give the user hints by making them resemble what they see +in their shell: + +- Indent shell examples by 4 spaces so they get rendered as code. +- Start typed commands with `$ ` (dollar space), so that they are easily + differentiated from program output. +- Program output has no prefix. +- Comments begin with `# ` (hash space). +- In-container shell commands begin with `$$ ` (dollar dollar space). + +### Images + +When you need to add images, try to make them as small as possible (e.g., as +gifs). Usually images should go in the same directory as the `.md` file which +references them, or in a subdirectory if one already exists. + +## Working using GitHub's file editor + +Alternatively, for small changes and typos you might want to use GitHub's built- +in file editor. It allows you to preview your changes right on-line (though +there can be some differences between GitHub Markdown and [MkDocs +Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be +careful not to create many commits. And you must still [sign your +work!](../CONTRIBUTING.md#sign-your-work) + +## Branches + +**There are two branches related to editing docs**: `master` and a `docs` +branch. You should always edit the documentation on a local branch of the `master` +branch, and send a PR against `master`. + +That way your edits will automatically get included in later releases, and docs +maintainers can easily cherry-pick your changes into the `docs` release branch. +In the rare case where your change is not forward-compatible, you may need to +base your changes on the `docs` branch. + +Also, now that we have a `docs` branch, we can keep the +[http://docs.docker.com](http://docs.docker.com) docs up to date with any bugs +found between Docker code releases. + +> **Warning**: When *reading* the docs, the +> [http://docs-stage.docker.com](http://docs-stage.docker.com) documentation may +> include features not yet part of any official Docker release. The `beta-docs` +> site should be used only for understanding bleeding-edge development and +> `docs.docker.com` (which points to the `docs` branch`) should be used for the +> latest official release. + +## Publishing Documentation + +To publish a copy of the documentation you need to have Docker up and running on your +machine. You'll also need a `docs/awsconfig` file containing AWS settings to deploy to. +The release script will create an s3 if needed, and will then push the files to it. + + [profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf.... + aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... region = ap-southeast-2 + +The `profile` name must be the same as the name of the bucket you are deploying +to - which you call from the `docker` directory: + + make AWS_S3_BUCKET=dowideit-docs docs-release + +This will publish _only_ to the `http://bucket-url/v1.2/` version of the +documentation. + +If you're publishing the current release's documentation, you need to +also update the root docs pages by running + + make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release + +> **Note:** if you are using Boot2Docker on OSX and the above command returns an error, +> `Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: +> dial unix /var/run/docker.sock: no such file or directory', you need to set the Docker +> host. Run `$(boot2docker shellinit)` to see the correct variable to set. The command +> will return the full `export` command, so you can just cut and paste. + +## Cherry-picking documentation changes to update an existing release. + +Whenever the core team makes a release, they publish the documentation based +on the `release` branch (which is copied into the `docs` branch). The +documentation team can make updates in the meantime, by cherry-picking changes +from `master` into any of the docs branches. + +For example, to update the current release's docs: + + git fetch upstream + git checkout -b post-1.2.0-docs-update-1 upstream/docs + # Then go through the Merge commit linked to PR's (making sure they apply + to that release) + # see https://github.com/docker/docker/commits/master + git cherry-pick -x fe845c4 + # Repeat until you have cherry picked everything you will propose to be merged + git push upstream post-1.2.0-docs-update-1 + +Then make a pull request to merge into the `docs` branch, __NOT__ into master. + +Once the PR has the needed `LGTM`s, merge it, then publish to our beta server +to test: + + git fetch upstream + git checkout post-1.2.0-docs-update-1 + git reset --hard upstream/post-1.2.0-docs-update-1 + make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release + +Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/ +to view your results and make sure what you published is what you wanted. + +When you're happy with it, publish the docs to our live site: + + make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release + +Note that the new docs will not appear live on the site until the cache (a complex, +distributed CDN system) is flushed. This requires someone with S3 keys. Contact Docker +(Sven Dowideit or John Costa) for assistance. + diff --git a/docs/docs-update.py b/docs/docs-update.py new file mode 100755 index 00000000..b605aecc --- /dev/null +++ b/docs/docs-update.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python + +# +# Sven's quick hack script to update the documentation +# +# call with: +# ./docs/update.py /usr/bin/docker +# + +import datetime +import re +from sys import argv +import subprocess +import os +import os.path + +script, docker_cmd = argv + +date_string = datetime.date.today().strftime('%B %Y') + + +def print_usage(outtext, docker_cmd, command): + try: + help_string = subprocess.check_output( + "".join((docker_cmd, " ", command, " --help")), + stderr=subprocess.STDOUT, + shell=True + ) + except subprocess.CalledProcessError, e: + help_string = e.output + for l in str(help_string).strip().split("\n"): + l = l.rstrip() + if l == '': + outtext.write("\n") + else: + # `docker --help` tells the user the path they called it with + l = re.sub(docker_cmd, "docker", l) + outtext.write(" {}\n".format(l)) + outtext.write("\n") + + +# TODO: look for an complain about any missing commands +def update_cli_reference(): + originalFile = "docs/sources/reference/commandline/cli.md" + os.rename(originalFile, originalFile+".bak") + + intext = open("{}.bak".format(originalFile), "r") + outtext = open(originalFile, "w") + + mode = 'p' + space = " " + command = "" + # 2 mode line-by line parser + for line in intext: + if mode == 'p': + # Prose + match = re.match("( \s*)Usage: docker ([a-z]+)", line) + if match: + # the begining of a Docker command usage block + space = match.group(1) + command = match.group(2) + mode = 'c' + else: + match = re.match("( \s*)Usage of .*docker.*:", line) + if match: + # the begining of the Docker --help usage block + space = match.group(1) + command = "" + mode = 'c' + else: + outtext.write(line) + else: + # command usage block + match = re.match("("+space+")(.*)|^$", line) + if not match: + # The end of the current usage block + # Shell out to run docker to see the new output + print_usage(outtext, docker_cmd, command) + outtext.write(line) + mode = 'p' + if mode == 'c': + print_usage(outtext, docker_cmd, command) + + +def update_man_pages(): + cmds = [] + try: + help_string = subprocess.check_output( + "".join((docker_cmd)), + stderr=subprocess.STDOUT, + shell=True + ) + except subprocess.CalledProcessError, e: + help_string = e.output + for l in str(help_string).strip().split("\n"): + l = l.rstrip() + if l != "": + match = re.match(" (.*?) .*", l) + if match: + cmds.append(match.group(1)) + + desc_re = re.compile( + r".*# DESCRIPTION(.*?)# (OPTIONS|EXAMPLES?).*", + re.MULTILINE | re.DOTALL + ) + + example_re = re.compile( + r".*# EXAMPLES?(.*)# HISTORY.*", + re.MULTILINE | re.DOTALL + ) + + history_re = re.compile( + r".*# HISTORY(.*)", + re.MULTILINE | re.DOTALL + ) + + for command in cmds: + print "COMMAND: "+command + history = "" + description = "" + examples = "" + if os.path.isfile("docs/man/docker-"+command+".1.md"): + intext = open("docs/man/docker-"+command+".1.md", "r") + txt = intext.read() + intext.close() + match = desc_re.match(txt) + if match: + description = match.group(1) + match = example_re.match(txt) + if match: + examples = match.group(1) + match = history_re.match(txt) + if match: + history = match.group(1).strip() + + usage = "" + usage_description = "" + params = {} + key_params = {} + + try: + help_string = subprocess.check_output( + "".join((docker_cmd, " ", command, " --help")), + stderr=subprocess.STDOUT, + shell=True + ) + except subprocess.CalledProcessError, e: + help_string = e.output + + last_key = "" + for l in str(help).split("\n"): + l = l.rstrip() + if l != "": + match = re.match("Usage: docker {}(.*)".format(command), l) + if match: + usage = match.group(1).strip() + else: + match = re.match(" (-+)(.*) \s+(.*)", l) + if match: + last_key = match.group(2).rstrip() + key_params[last_key] = match.group(1)+last_key + params[last_key] = match.group(3) + else: + if last_key != "": + params[last_key] = "{}\n{}".format(params[last_key], l) + else: + if usage_description != "": + usage_description = usage_description + "\n" + usage_description = usage_description + l + + # replace [OPTIONS] with the list of params + options = "" + match = re.match("\[OPTIONS\](.*)", usage) + if match: + usage = match.group(1) + + new_usage = "" + # TODO: sort without the `-`'s + for key in sorted(params.keys(), key=lambda s: s.lower()): + # split on commas, remove --?.*=.*, put in *'s mumble + ps = [] + opts = [] + for k in key_params[key].split(","): + match = re.match("(-+)([A-Za-z-0-9]*)(?:=(.*))?", k.lstrip()) + if match: + p = "**{}{}**".format(match.group(1), match.group(2)) + o = "**{}{}**".format(match.group(1), match.group(2)) + if match.group(3): + val = match.group(3) + if val == "\"\"": + val = match.group(2).upper() + p = "{}[=*{}*]".format(p, val) + val = match.group(3) + if val in ("true", "false"): + params[key] = params[key].rstrip() + if not params[key].endswith('.'): + params[key] = params[key]+ "." + params[key] = "{} The default is *{}*.".format(params[key], val) + val = "*true*|*false*" + o = "{}={}".format(o, val) + ps.append(p) + opts.append(o) + else: + print "nomatch:{}".format(k) + new_usage = "{}\n[{}]".format(new_usage, "|".join(ps)) + options = "{}{}\n {}\n\n".format(options, ", ".join(opts), params[key]) + if new_usage != "": + new_usage = "{}\n".format(new_usage.strip()) + usage = new_usage + usage + + outtext = open("docs/man/docker-{}.1.md".format(command), "w") + outtext.write("""% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +""") + outtext.write("docker-{} - {}\n\n".format(command, usage_description)) + outtext.write("# SYNOPSIS\n**docker {}**\n{}\n\n".format(command, usage)) + if description != "": + outtext.write("# DESCRIPTION{}".format(description)) + if options == "": + options = "There are no available options.\n\n" + outtext.write("# OPTIONS\n{}".format(options)) + if examples != "": + outtext.write("# EXAMPLES{}".format(examples)) + outtext.write("# HISTORY\n") + if history != "": + outtext.write("{}\n".format(history)) + recent_history_re = re.compile( + ".*{}.*".format(date_string), + re.MULTILINE | re.DOTALL + ) + if not recent_history_re.match(history): + outtext.write("{}, updated by Sven Dowideit \n".format(date_string)) + outtext.close() + +# main +update_cli_reference() +update_man_pages() diff --git a/docs/man/Dockerfile b/docs/man/Dockerfile new file mode 100644 index 00000000..9910bd48 --- /dev/null +++ b/docs/man/Dockerfile @@ -0,0 +1,7 @@ +FROM golang:1.3 +RUN mkdir -p /go/src/github.com/cpuguy83 +RUN mkdir -p /go/src/github.com/cpuguy83 \ + && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ + && cd /go/src/github.com/cpuguy83/go-md2man \ + && go get -v ./... +CMD ["/go/bin/go-md2man", "--help"] diff --git a/docs/man/Dockerfile.5.md b/docs/man/Dockerfile.5.md new file mode 100644 index 00000000..4104dc23 --- /dev/null +++ b/docs/man/Dockerfile.5.md @@ -0,0 +1,207 @@ +% DOCKERFILE(5) Docker User Manuals +% Zac Dover +% May 2014 +# NAME + +Dockerfile - automate the steps of creating a Docker image + +# INTRODUCTION +The **Dockerfile** is a configuration file that automates the steps of creating +a Docker image. It is similar to a Makefile. Docker reads instructions from the +**Dockerfile** to automate the steps otherwise performed manually to create an +image. To build an image, create a file called **Dockerfile**. The +**Dockerfile** describes the steps taken to assemble the image. When the +**Dockerfile** has been created, call the **docker build** command, using the +path of directory that contains **Dockerfile** as the argument. + +# SYNOPSIS + +INSTRUCTION arguments + +For example: + +FROM image + +# DESCRIPTION + +A Dockerfile is a file that automates the steps of creating a Docker image. +A Dockerfile is similar to a Makefile. + +# USAGE + +**sudo docker build .** + -- runs the steps and commits them, building a final image + The path to the source repository defines where to find the context of the + build. The build is run by the docker daemon, not the CLI. The whole + context must be transferred to the daemon. The Docker CLI reports + "Sending build context to Docker daemon" when the context is sent to the daemon. + +**sudo docker build -t repository/tag .** + -- specifies a repository and tag at which to save the new image if the build + succeeds. The Docker daemon runs the steps one-by-one, committing the result + to a new image if necessary before finally outputting the ID of the new + image. The Docker daemon automatically cleans up the context it is given. + +Docker re-uses intermediate images whenever possible. This significantly +accelerates the *docker build* process. + +# FORMAT + +**FROM image** +or +**FROM image:tag** + -- The FROM instruction sets the base image for subsequent instructions. A + valid Dockerfile must have FROM as its first instruction. The image can be any + valid image. It is easy to start by pulling an image from the public + repositories. + -- FROM must be he first non-comment instruction in Dockerfile. + -- FROM may appear multiple times within a single Dockerfile in order to create + multiple images. Make a note of the last image id output by the commit before + each new FROM command. + -- If no tag is given to the FROM instruction, latest is assumed. If the used + tag does not exist, an error is returned. + +**MAINTAINER** + --The MAINTAINER instruction sets the Author field for the generated images. + +**RUN** + --RUN has two forms: + **RUN ** + -- (the command is run in a shell - /bin/sh -c) + **RUN ["executable", "param1", "param2"]** + --The above is executable form. + --The RUN instruction executes any commands in a new layer on top of the + current image and commits the results. The committed image is used for the next + step in Dockerfile. + --Layering RUN instructions and generating commits conforms to the core + concepts of Docker where commits are cheap and containers can be created from + any point in the history of an image. This is similar to source control. The + exec form makes it possible to avoid shell string munging. The exec form makes + it possible to RUN commands using a base image that does not contain /bin/sh. + +**CMD** + --CMD has three forms: + **CMD ["executable", "param1", "param2"]** This is the preferred form, the + exec form. + **CMD ["param1", "param2"]** This command provides default parameters to + ENTRYPOINT) + **CMD command param1 param2** This command is run as a shell. + --There can be only one CMD in a Dockerfile. If more than one CMD is listed, only + the last CMD takes effect. + The main purpose of a CMD is to provide defaults for an executing container. + These defaults may include an executable, or they can omit the executable. If + they omit the executable, an ENTRYPOINT must be specified. + When used in the shell or exec formats, the CMD instruction sets the command to + be executed when running the image. + If you use the shell form of the CMD, the executes in /bin/sh -c: + **FROM ubuntu** + **CMD echo "This is a test." | wc -** + If you run without a shell, then you must express the command as a + JSON array and give the full path to the executable. This array form is the + preferred form of CMD. All additional parameters must be individually expressed + as strings in the array: + **FROM ubuntu** + **CMD ["/usr/bin/wc","--help"]** + To make the container run the same executable every time, use ENTRYPOINT in + combination with CMD. + If the user specifies arguments to docker run, the specified commands override + the default in CMD. + Do not confuse **RUN** with **CMD**. RUN runs a command and commits the result. CMD + executes nothing at build time, but specifies the intended command for the + image. + +**EXPOSE** + --**EXPOSE [...]** + The **EXPOSE** instruction informs Docker that the container listens on the + specified network ports at runtime. Docker uses this information to + interconnect containers using links, and to set up port redirection on the host + system. + +**ENV** + --**ENV ** + The ENV instruction sets the environment variable to + the value . This value is passed to all future RUN instructions. This is + functionally equivalent to prefixing the command with **=**. The + environment variables that are set with ENV persist when a container is run + from the resulting image. Use docker inspect to inspect these values, and + change them using docker run **--env =.** + + Note that setting Setting **ENV DEBIAN_FRONTEND noninteractive** may cause + unintended consequences, because it will persist when the container is run + interactively, as with the following command: **docker run -t -i image bash** + +**ADD** + --**ADD ... ** The ADD instruction copies new files, directories + or remote file URLs to the filesystem of the container at path . + Mutliple resources may be specified but if they are files or directories + then they must be relative to the source directory that is being built + (the context of the build). is the absolute path to + which the source is copied inside the target container. All new files and + directories are created with mode 0755, with uid and gid 0. + +**ENTRYPOINT** + --**ENTRYPOINT** has two forms: ENTRYPOINT ["executable", "param1", "param2"] + (This is like an exec, and is the preferred form.) ENTRYPOINT command param1 + param2 (This is running as a shell.) An ENTRYPOINT helps you configure a + container that can be run as an executable. When you specify an ENTRYPOINT, + the whole container runs as if it was only that executable. The ENTRYPOINT + instruction adds an entry command that is not overwritten when arguments are + passed to docker run. This is different from the behavior of CMD. This allows + arguments to be passed to the entrypoint, for instance docker run -d + passes the -d argument to the ENTRYPOINT. Specify parameters either in the + ENTRYPOINT JSON array (as in the preferred exec form above), or by using a CMD + statement. Parameters in the ENTRYPOINT are not overwritten by the docker run + arguments. Parameters specifies via CMD are overwritten by docker run + arguments. Specify a plain string for the ENTRYPOINT, and it will execute in + /bin/sh -c, like a CMD instruction: + FROM ubuntu + ENTRYPOINT wc -l - + This means that the Dockerfile's image always takes stdin as input (that's + what "-" means), and prints the number of lines (that's what "-l" means). To + make this optional but default, use a CMD: + FROM ubuntu + CMD ["-l", "-"] + ENTRYPOINT ["/usr/bin/wc"] + +**VOLUME** + --**VOLUME ["/data"]** + The VOLUME instruction creates a mount point with the specified name and marks + it as holding externally-mounted volumes from the native host or from other + containers. + +**USER** + -- **USER daemon** + The USER instruction sets the username or UID that is used when running the + image. + +**WORKDIR** + -- **WORKDIR /path/to/workdir** + The WORKDIR instruction sets the working directory for the **RUN**, **CMD**, and **ENTRYPOINT** Dockerfile commands that follow it. + It can be used multiple times in a single Dockerfile. Relative paths are defined relative to the path of the previous **WORKDIR** instruction. For example: + **WORKDIR /a WORKDIR b WORKDIR c RUN pwd** + In the above example, the output of the **pwd** command is **a/b/c**. + +**ONBUILD** + -- **ONBUILD [INSTRUCTION]** + The ONBUILD instruction adds a trigger instruction to the image, which is + executed at a later time, when the image is used as the base for another + build. The trigger is executed in the context of the downstream build, as + if it had been inserted immediately after the FROM instruction in the + downstream Dockerfile. Any build instruction can be registered as a + trigger. This is useful if you are building an image to be + used as a base for building other images, for example an application build + environment or a daemon to be customized with a user-specific + configuration. For example, if your image is a reusable python + application builder, it requires application source code to be + added in a particular directory, and might require a build script + to be called after that. You can't just call ADD and RUN now, because + you don't yet have access to the application source code, and it + is different for each application build. Providing + application developers with a boilerplate Dockerfile to copy-paste + into their application is inefficient, error-prone, and + difficult to update because it mixes with application-specific code. + The solution is to use **ONBUILD** to register instructions in advance, to + run later, during the next build stage. + +# HISTORY +*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. diff --git a/docs/man/README.md b/docs/man/README.md new file mode 100644 index 00000000..a52e0cbe --- /dev/null +++ b/docs/man/README.md @@ -0,0 +1,70 @@ +Docker Documentation +==================== + +This directory contains the Docker user manual in the Markdown format. +Do *not* edit the man pages in the man1 directory. Instead, amend the +Markdown (*.md) files. + +# File List + + docker.md + docker-attach.md + docker-build.md + docker-commit.md + docker-cp.md + docker-diff.md + docker-events.md + docker-export.md + docker-history.md + docker-images.md + docker-import.md + docker-info.md + docker-inspect.md + docker-kill.md + docker-load.md + docker-login.md + docker-logs.md + docker-port.md + docker-ps.md + docker-pull.md + docker-push.md + docker-restart.md + docker-rmi.md + docker-rm.md + docker-run.md + docker-save.md + docker-search.md + docker-start.md + docker-stop.md + docker-tag.md + docker-top.md + docker-wait.md + Dockerfile + md2man-all.sh + +# Generating man pages from the Markdown files + +The recommended approach for generating the man pages is via a Docker +container using the supplied `Dockerfile` to create an image with the correct +environment. This uses `go-md2man`, a pure Go Markdown to man page generator. + +## Building the md2man image + +There is a `Dockerfile` provided in the `docker/docs/man` directory. + +Using this `Dockerfile`, create a Docker image tagged `docker/md2man`: + + docker build -t docker/md2man . + +## Utilizing the image + +Once the image is built, run a container using the image with *volumes*: + + docker run -v //docker/docs/man:/docs:rw \ + -w /docs -i docker/md2man /docs/md2man-all.sh + +The `md2man` Docker container will process the Markdown files and generate +the man pages inside the `docker/docs/man/man1` directory using +Docker volumes. For more information on Docker volumes see the man page for +`docker run` and also look at the article [Sharing Directories via Volumes] +(http://docs.docker.com/use/working_with_volumes/). diff --git a/docs/man/docker-attach.1.md b/docs/man/docker-attach.1.md new file mode 100644 index 00000000..7deda6c7 --- /dev/null +++ b/docs/man/docker-attach.1.md @@ -0,0 +1,61 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-attach - Attach to a running container + +# SYNOPSIS +**docker attach** +[**--no-stdin**[=*false*]] +[**--sig-proxy**[=*true*]] + CONTAINER + +# DESCRIPTION +If you **docker run** a container in detached mode (**-d**), you can reattach to +the detached container with **docker attach** using the container's ID or name. + +You can detach from the container again (and leave it running) with `CTRL-p +CTRL-q` (for a quiet exit), or `CTRL-c` which will send a SIGKILL to the +container, or `CTRL-\` to get a stacktrace of the Docker client when it quits. +When you detach from a container the exit code will be returned to +the client. + +# OPTIONS +**--no-stdin**=*true*|*false* + Do not attach STDIN. The default is *false*. + +**--sig-proxy**=*true*|*false* + Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. + +# EXAMPLES + +## Attaching to a container + +In this example the top command is run inside a container, from an image called +fedora, in detached mode. The ID from the container is passed into the **docker +attach** command: + + # ID=$(sudo docker run -d fedora /usr/bin/top -b) + # sudo docker attach $ID + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-build.1.md b/docs/man/docker-build.1.md new file mode 100644 index 00000000..c562660b --- /dev/null +++ b/docs/man/docker-build.1.md @@ -0,0 +1,121 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-build - Build a new image from the source code at PATH + +# SYNOPSIS +**docker build** +[**--force-rm**[=*false*]] +[**--no-cache**[=*false*]] +[**-q**|**--quiet**[=*false*]] +[**--rm**[=*true*]] +[**-t**|**--tag**[=*TAG*]] + PATH | URL | - + +# DESCRIPTION +This will read the Dockerfile from the directory specified in **PATH**. +It also sends any other files and directories found in the current +directory to the Docker daemon. The contents of this directory would +be used by **ADD** commands found within the Dockerfile. + +Warning, this will send a lot of data to the Docker daemon depending +on the contents of the current directory. The build is run by the Docker +daemon, not by the CLI, so the whole context must be transferred to the daemon. +The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to +the daemon. + +When a single Dockerfile is given as the URL, then no context is set. +When a Git repository is set as the **URL**, the repository is used +as context. + +# OPTIONS +**--force-rm**=*true*|*false* + Always remove intermediate containers, even after unsuccessful builds. The default is *false*. + +**--no-cache**=*true*|*false* + Do not use cache when building the image. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Suppress the verbose output generated by the containers. The default is *false*. + +**--rm**=*true*|*false* + Remove intermediate containers after a successful build. The default is *true*. + +**-t**, **--tag**="" + Repository name (and optionally a tag) to be applied to the resulting image in case of success + +# EXAMPLES + +## Building an image using a Dockefile located inside the current directory + +Docker images can be built using the build command and a Dockerfile: + + docker build . + +During the build process Docker creates intermediate images. In order to +keep them, you must explicitly set `--rm=false`. + + docker build --rm=false . + +A good practice is to make a sub-directory with a related name and create +the Dockerfile in that directory. For example, a directory called mongo may +contain a Dockerfile to create a Docker MongoDB image. Likewise, another +directory called httpd may be used to store Dockerfiles for Apache web +server images. + +It is also a good practice to add the files required for the image to the +sub-directory. These files will then be specified with the `ADD` instruction +in the Dockerfile. Note: If you include a tar file (a good practice!), then +Docker will automatically extract the contents of the tar file +specified within the `ADD` instruction into the specified target. + +## Building an image and naming that image + +A good practice is to give a name to the image you are building. There are +no hard rules here but it is best to give the names consideration. + +The **-t**/**--tag** flag is used to rename an image. Here are some examples: + +Though it is not a good practice, image names can be arbtrary: + + docker build -t myimage . + +A better approach is to provide a fully qualified and meaningful repository, +name, and tag (where the tag in this context means the qualifier after +the ":"). In this example we build a JBoss image for the Fedora repository +and give it the version 1.0: + + docker build -t fedora/jboss:1.0 + +The next example is for the "whenry" user repository and uses Fedora and +JBoss and gives it the version 2.1 : + + docker build -t whenry/fedora-jboss:V2.1 + +If you do not provide a version tag then Docker will assign `latest`: + + docker build -t whenry/fedora-jboss + +When you list the images, the image above will have the tag `latest`. + +So renaming an image is arbitrary but consideration should be given to +a useful convention that makes sense for consumers and should also take +into account Docker community conventions. + + +## Building an image using a URL + +This will clone the specified Github repository from the URL and use it +as context. The Dockerfile at the root of the repository is used as +Dockerfile. This only works if the Github repository is a dedicated +repository. + + docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache + +Note: You can set an arbitrary Git repository via the `git://` schema. + +# HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-commit.1.md b/docs/man/docker-commit.1.md new file mode 100644 index 00000000..31edcc03 --- /dev/null +++ b/docs/man/docker-commit.1.md @@ -0,0 +1,41 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-commit - Create a new image from a container's changes + +# SYNOPSIS +**docker commit** +[**-a**|**--author**[=*AUTHOR*]] +[**-m**|**--message**[=*MESSAGE*]] +[**-p**|**--pause**[=*true*]] + CONTAINER [REPOSITORY[:TAG]] + +# DESCRIPTION +Using an existing container's name or ID you can create a new image. + +# OPTIONS +**-a**, **--author**="" + Author (e.g., "John Hannibal Smith ") + +**-m**, **--message**="" + Commit message + +**-p**, **--pause**=*true*|*false* + Pause container during commit. The default is *true*. + +# EXAMPLES + +## Creating a new image from an existing container +An existing Fedora based container has had Apache installed while running +in interactive mode with the bash shell. Apache is also running. To +create a new image run docker ps to find the container's ID and then run: + + # docker commit -m="Added Apache to Fedora base image" \ + -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and in +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/docs/man/docker-cp.1.md b/docs/man/docker-cp.1.md new file mode 100644 index 00000000..dc8f295b --- /dev/null +++ b/docs/man/docker-cp.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-cp - Copy files/folders from the PATH to the HOSTPATH + +# SYNOPSIS +**docker cp** +CONTAINER:PATH HOSTPATH + +# DESCRIPTION +Copy files/folders from a container's filesystem to the host +path. Paths are relative to the root of the filesystem. Files +can be copied from a running or stopped container. + +# OPTIONS +There are no available options. + +# EXAMPLES +An important shell script file, created in a bash shell, is copied from +the exited container to the current dir on the host: + + # docker cp c071f3c3ee81:setup.sh . + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md new file mode 100644 index 00000000..c5ed0349 --- /dev/null +++ b/docs/man/docker-create.1.md @@ -0,0 +1,140 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-create - Create a new container + +# SYNOPSIS +**docker create** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**-c**|**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpuset**[=*CPUSET*]] +[**--device**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--dns**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**-i**|**--interactive**[=*false*]] +[**--link**[=*[]*]] +[**--lxc-conf**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--name**[=*NAME*]] +[**--net**[=*"bridge"*]] +[**-P**|**--publish-all**[=*false*]] +[**-p**|**--publish**[=*[]*]] +[**--privileged**[=*false*]] +[**--restart**[=*RESTART*]] +[**-t**|**--tty**[=*false*]] +[**-u**|**--user**[=*USER*]] +[**-v**|**--volume**[=*[]*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] + IMAGE [COMMAND] [ARG...] + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + +**-c**, **--cpu-shares**=0 + CPU shares (relative weight) + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cidfile**="" + Write the container ID to the file + +**--cpuset**="" + CPUs in which to allow execution (0-3, 0,1) + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + +**--dns-search**=[] + Set custom DNS search domains + +**--dns**=[] + Set custom DNS servers + +**-e**, **--env**=[] + Set environment variables + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=[] + Expose a port from the container without publishing it to your host + +**-h**, **--hostname**="" + Container host name + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--link**=[] + Add link to another container in the form of name:alias + +**--lxc-conf**=[] + (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + +**--name**="" + Assign a name to the container + +**--net**="bridge" + Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to the host interfaces. The default is *false*. + +**-p**, **--publish**=[] + Publish a container's port to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + (use 'docker port' to see the actual mapping) + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + +**--restart**="" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always) + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**-u**, **--user**="" + Username or UID + +**-v**, **--volume**=[] + Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) + +**--volumes-from**=[] + Mount volumes from the specified container(s) + +**-w**, **--workdir**="" + Working directory inside the container + +# HISTORY +August 2014, updated by Sven Dowideit +September 2014, updated by Sven Dowideit diff --git a/docs/man/docker-diff.1.md b/docs/man/docker-diff.1.md new file mode 100644 index 00000000..acf0911b --- /dev/null +++ b/docs/man/docker-diff.1.md @@ -0,0 +1,47 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-diff - Inspect changes on a container's filesystem + +# SYNOPSIS +**docker diff** +CONTAINER + +# DESCRIPTION +Inspect changes on a container's filesystem. You can use the full or +shortened container ID or the container name set using +**docker run --name** option. + +# OPTIONS +There are no available options. + +# EXAMPLES +Inspect the changes to on a nginx container: + + # docker diff 1fdfd1f54c1b + C /dev + C /dev/console + C /dev/core + C /dev/stdout + C /dev/fd + C /dev/ptmx + C /dev/stderr + C /dev/stdin + C /run + A /run/nginx.pid + C /var/lib/nginx/tmp + A /var/lib/nginx/tmp/client_body + A /var/lib/nginx/tmp/fastcgi + A /var/lib/nginx/tmp/proxy + A /var/lib/nginx/tmp/scgi + A /var/lib/nginx/tmp/uwsgi + C /var/log/nginx + A /var/log/nginx/access.log + A /var/log/nginx/error.log + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-events.1.md b/docs/man/docker-events.1.md new file mode 100644 index 00000000..c8884397 --- /dev/null +++ b/docs/man/docker-events.1.md @@ -0,0 +1,61 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-events - Get real time events from the server + +# SYNOPSIS +**docker events** +[**--since**[=*SINCE*]] +[**--until**[=*UNTIL*]] + + +# DESCRIPTION +Get event information from the Docker daemon. Information can include historical +information and real-time information. + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +# OPTIONS +**--since**="" + Show all events created since timestamp + +**--until**="" + Stream events until this timestamp + +# EXAMPLES + +## Listening for Docker events + +After running docker events a container 786d698004576 is started and stopped +(The container name has been shortened in the output below): + + # docker events + [2014-04-12 18:23:04 -0400 EDT] 786d69800457: (from whenry/testimage:latest) start + [2014-04-12 18:23:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) die + [2014-04-12 18:23:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) stop + +## Listening for events since a given date +Again the output container IDs have been shortened for the purposes of this document: + + # docker events --since '2014-04-12' + [2014-04-12 18:11:28 -0400 EDT] c655dbf640dc: (from whenry/testimage:latest) create + [2014-04-12 18:11:28 -0400 EDT] c655dbf640dc: (from whenry/testimage:latest) start + [2014-04-12 18:14:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) create + [2014-04-12 18:14:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) start + [2014-04-12 18:22:44 -0400 EDT] 786d69800457: (from whenry/testimage:latest) die + [2014-04-12 18:22:44 -0400 EDT] 786d69800457: (from whenry/testimage:latest) stop + [2014-04-12 18:23:04 -0400 EDT] 786d69800457: (from whenry/testimage:latest) start + [2014-04-12 18:23:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) die + [2014-04-12 18:23:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) stop + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-exec.1.md b/docs/man/docker-exec.1.md new file mode 100644 index 00000000..d5ec1265 --- /dev/null +++ b/docs/man/docker-exec.1.md @@ -0,0 +1,29 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% SEPT 2014 +# NAME +docker-exec - Run a command in a running container + +# SYNOPSIS +**docker exec** +[**-d**|**--detach**[=*false*]] +[**-i**|**--interactive**[=*false*]] +[**-t**|**--tty**[=*false*]] + CONTAINER COMMAND [ARG...] + +# DESCRIPTION + +Run a process in a running container. + +# Options + +**-d**, **--detach**=*true*|*false* + Detached mode. This runs the new process in the background. + +**-i**, **--interactive**=*true*|*false* + When set to true, keep STDIN open even if not attached. The default is false. + +**-t**, **--tty**=*true*|*false* + When set to true Docker can allocate a pseudo-tty and attach to the standard +input of the process. This can be used, for example, to run a throwaway +interactive shell. The default value is false. diff --git a/docs/man/docker-export.1.md b/docs/man/docker-export.1.md new file mode 100644 index 00000000..8fd7834a --- /dev/null +++ b/docs/man/docker-export.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-export - Export the contents of a filesystem as a tar archive to STDOUT + +# SYNOPSIS +**docker export** +CONTAINER + +# DESCRIPTION +Export the contents of a container's filesystem using the full or shortened +container ID or container name. The output is exported to STDOUT and can be +redirected to a tar file. + +# OPTIONS +There are no available options. + +# EXAMPLES +Export the contents of the container called angry_bell to a tar file +called test.tar: + + # docker export angry_bell > test.tar + # ls *.tar + test.tar + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-history.1.md b/docs/man/docker-history.1.md new file mode 100644 index 00000000..ddb164e5 --- /dev/null +++ b/docs/man/docker-history.1.md @@ -0,0 +1,34 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-history - Show the history of an image + +# SYNOPSIS +**docker history** +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] + IMAGE + +# DESCRIPTION + +Show the history of when and how an image was created. + +# OPTIONS +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + $ sudo docker history fedora + IMAGE CREATED CREATED BY SIZE + 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB + 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 10 months ago 0 B + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-images.1.md b/docs/man/docker-images.1.md new file mode 100644 index 00000000..c572ee67 --- /dev/null +++ b/docs/man/docker-images.1.md @@ -0,0 +1,90 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-images - List images + +# SYNOPSIS +**docker images** +[**-a**|**--all**[=*false*]] +[**-f**|**--filter**[=*[]*]] +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] + [NAME] + +# DESCRIPTION +This command lists the images stored in the local Docker repository. + +By default, intermediate images, used during builds, are not listed. Some of the +output, e.g., image ID, is truncated, for space reasons. However the truncated +image ID, and often the first few characters, are enough to be used in other +Docker commands that use the image ID. The output includes repository, tag, image +ID, date created and the virtual size. + +The title REPOSITORY for the first title may seem confusing. It is essentially +the image name. However, because you can tag a specific image, and multiple tags +(image instances) can be associated with a single name, the name is really a +repository for all tagged images of the same name. For example consider an image +called fedora. It may be tagged with 18, 19, or 20, etc. to manage different +versions. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all images (by default filter out the intermediate image layers). The default is *false*. + +**-f**, **--filter**=[] + Provide filter values (i.e. 'dangling=true') + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + +## Listing the images + +To list the images in a local repository (not the registry) run: + + docker images + +The list will contain the image repository name, a tag for the image, and an +image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, +IMAGE ID, CREATED, and VIRTUAL SIZE. + +To get a verbose list of images which contains all the intermediate images +used in builds use **-a**: + + docker images -a + +## List images dependency tree hierarchy + +To list the images in the local repository (not the registry) in a dependency +tree format, use the **-t** option. + + docker images -t + +This displays a staggered hierarchy tree where the less indented image is +the oldest with dependent image layers branching inward (to the right) on +subsequent lines. The newest or top level image layer is listed last in +any tree branch. + +## List images in GraphViz format + +To display the list in a format consumable by a GraphViz tools run with +**-v**. For example to produce a .png graph file of the hierarchy use: + + docker images --viz | dot -Tpng -o docker.png + +## Listing only the shortened image IDs + +Listing just the shortened image IDs. This can be useful for some automated +tools. + + docker images -q + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-import.1.md b/docs/man/docker-import.1.md new file mode 100644 index 00000000..2d67b8bc --- /dev/null +++ b/docs/man/docker-import.1.md @@ -0,0 +1,43 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. + +# SYNOPSIS +**docker import** +URL|- [REPOSITORY[:TAG]] + +# DESCRIPTION +Create a new filesystem image from the contents of a tarball (`.tar`, +`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Import from a remote location + + # docker import http://example.com/exampleimage.tgz example/imagerepo + +## Import from a local file + +Import to docker via pipe and stdin: + + # cat exampleimage.tgz | docker import - example/imagelocal + +## Import from a local file and tag + +Import to docker via pipe and stdin: + + # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 + +## Import from a local directory + + # tar -c . | docker import - exampleimagedir + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-info.1.md b/docs/man/docker-info.1.md new file mode 100644 index 00000000..bf64a7b5 --- /dev/null +++ b/docs/man/docker-info.1.md @@ -0,0 +1,44 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-info - Display system-wide information + +# SYNOPSIS +**docker info** + + +# DESCRIPTION +This command displays system wide information regarding the Docker installation. +Information displayed includes the number of containers and images, pool name, +data file, metadata file, data space used, total data space, metadata space used +, total metadata space, execution driver, and the kernel version. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Display Docker system information + +Here is a sample output: + + # docker info + Containers: 14 + Images: 52 + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Dirs: 80 + Execution Driver: native-0.2 + Kernel Version: 3.13.0-24-generic + Operating System: Ubuntu 14.04 LTS + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-inspect.1.md b/docs/man/docker-inspect.1.md new file mode 100644 index 00000000..a52d57c9 --- /dev/null +++ b/docs/man/docker-inspect.1.md @@ -0,0 +1,229 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-inspect - Return low-level information on a container or image + +# SYNOPSIS +**docker inspect** +[**-f**|**--format**[=*FORMAT*]] +CONTAINER|IMAGE [CONTAINER|IMAGE...] + +# DESCRIPTION + +This displays all the information available in Docker for a given +container or image. By default, this will render all results in a JSON +array. If a format is specified, the given template will be executed for +each result. + +# OPTIONS +**-f**, **--format**="" + Format the output using the given go template. + +# EXAMPLES + +## Getting information on a container + +To get information on a container use it's ID or instance name: + + #docker inspect 1eb5fabf5a03 + [{ + "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b", + "Created": "2014-04-04T21:33:52.02361335Z", + "Path": "/usr/sbin/nginx", + "Args": [], + "Config": { + "Hostname": "1eb5fabf5a03", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": true, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/usr/sbin/nginx" + ], + "Dns": null, + "DnsSearch": null, + "Image": "summit/nginx", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": { + "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650", + "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650" + } + }, + "State": { + "Running": true, + "Pid": 858, + "ExitCode": 0, + "StartedAt": "2014-04-04T21:33:54.16259207Z", + "FinishedAt": "0001-01-01T00:00:00Z", + "Ghost": false + }, + "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6", + "NetworkSettings": { + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "Gateway": "172.17.42.1", + "Bridge": "docker0", + "PortMapping": null, + "Ports": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "80" + } + ] + } + }, + "ResolvConfPath": "/etc/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname", + "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts", + "Name": "/ecstatic_ptolemy", + "Driver": "devicemapper", + "ExecDriver": "native-0.1", + "Volumes": {}, + "VolumesRW": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "80" + } + ] + }, + "Links": null, + "PublishAllPorts": false, + "DriverOptions": { + "lxc": null + }, + "CliAddress": "" + } + +## Getting the IP address of a container instance + +To get the IP address of a container use: + + # docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03 + 172.17.0.2 + +## Listing all port bindings + +One can loop over arrays and maps in the results to produce simple text +output: + + # docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ + {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03 + + 80/tcp -> 80 + +## Getting information on an image + +Use an image's ID or name (e.g., repository/name[:tag]) to get information + on it. + + # docker inspect 58394af37342 + [{ + "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9", + "parent": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "created": "2014-02-03T16:10:40.500814677Z", + "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5", + "container_config": { + "Hostname": "88807319f25e", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ADD fedora-20-dummy.tar.xz in /" + ], + "Dns": null, + "DnsSearch": null, + "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": null + }, + "docker_version": "0.6.3", + "author": "I P Babble \u003clsm5@ipbabble.com\u003e - ./buildcontainers.sh", + "config": { + "Hostname": "88807319f25e", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": null, + "Dns": null, + "DnsSearch": null, + "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": null + }, + "architecture": "x86_64", + "Size": 385520098 + }] + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-kill.1.md b/docs/man/docker-kill.1.md new file mode 100644 index 00000000..3c8d59e6 --- /dev/null +++ b/docs/man/docker-kill.1.md @@ -0,0 +1,24 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-kill - Kill a running container using SIGKILL or a specified signal + +# SYNOPSIS +**docker kill** +[**-s**|**--signal**[=*"KILL"*]] + CONTAINER [CONTAINER...] + +# DESCRIPTION + +The main process inside each container specified will be sent SIGKILL, + or any signal specified with option --signal. + +# OPTIONS +**-s**, **--signal**="KILL" + Signal to send to the container + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) + based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-load.1.md b/docs/man/docker-load.1.md new file mode 100644 index 00000000..07dac461 --- /dev/null +++ b/docs/man/docker-load.1.md @@ -0,0 +1,38 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-load - Load an image from a tar archive on STDIN + +# SYNOPSIS +**docker load** +[**-i**|**--input**[=*INPUT*]] + + +# DESCRIPTION + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. + +# OPTIONS +**-i**, **--input**="" + Read from a tar archive file, instead of STDIN + +# EXAMPLES + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ sudo docker load --input fedora.tar + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-login.1.md b/docs/man/docker-login.1.md new file mode 100644 index 00000000..c2693530 --- /dev/null +++ b/docs/man/docker-login.1.md @@ -0,0 +1,38 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-login - Register or log in to a Docker registry server, if no server is specified "https://index.docker.io/v1/" is the default. + +# SYNOPSIS +**docker login** +[**-e**|**--email**[=*EMAIL*]] +[**-p**|**--password**[=*PASSWORD*]] +[**-u**|**--username**[=*USERNAME*]] + [SERVER] + +# DESCRIPTION +Register or Login to a docker registry server, if no server is +specified "https://index.docker.io/v1/" is the default. If you want to +login to a private registry you can specify this by adding the server name. + +# OPTIONS +**-e**, **--email**="" + Email + +**-p**, **--password**="" + Password + +**-u**, **--username**="" + Username + +# EXAMPLES + +## Login to a local registry + + # docker login localhost:8080 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-logout.1.md b/docs/man/docker-logout.1.md new file mode 100644 index 00000000..07dcdcbc --- /dev/null +++ b/docs/man/docker-logout.1.md @@ -0,0 +1,27 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logout - Log out from a Docker registry, if no server is specified "https://index.docker.io/v1/" is the default. + +# SYNOPSIS +**docker logout** +[SERVER] + +# DESCRIPTION +Log the user out from a Docker registry, if no server is +specified "https://index.docker.io/v1/" is the default. If you want to +log out from a private registry you can specify this by adding the server name. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Log out from a local registry + + # docker logout localhost:8080 + +# HISTORY +June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) +July 2014, updated by Sven Dowideit diff --git a/docs/man/docker-logs.1.md b/docs/man/docker-logs.1.md new file mode 100644 index 00000000..1fbd229d --- /dev/null +++ b/docs/man/docker-logs.1.md @@ -0,0 +1,38 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logs - Fetch the logs of a container + +# SYNOPSIS +**docker logs** +[**-f**|**--follow**[=*false*]] +[**-t**|**--timestamps**[=*false*]] +[**--tail**[=*"all"*]] +CONTAINER + +# DESCRIPTION +The **docker logs** command batch-retrieves whatever logs are present for +a container at the time of execution. This does not guarantee execution +order when combined with a docker run (i.e. your run may not have generated +any logs at the time you execute docker logs). + +The **docker logs --follow** command combines commands **docker logs** and +**docker attach**. It will first return all logs from the beginning and +then continue streaming new output from the container’s stdout and stderr. + +# OPTIONS +**-f**, **--follow**=*true*|*false* + Follow log output. The default is *false*. + +**-t**, **--timestamps**=*true*|*false* + Show timestamps. The default is *false*. + +**--tail**="all" + Output the specified number of lines at the end of logs (defaults to all logs) + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/docs/man/docker-pause.1.md b/docs/man/docker-pause.1.md new file mode 100644 index 00000000..7b4b091a --- /dev/null +++ b/docs/man/docker-pause.1.md @@ -0,0 +1,27 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pause - Pause all processes within a container + +# SYNOPSIS +**docker pause** +CONTAINER + +# DESCRIPTION + +The `docker pause` command uses the cgroups freezer to suspend all processes in +a container. Traditionally when suspending a process the `SIGSTOP` signal is +used, which is observable by the process being suspended. With the cgroups freezer +the process is unaware, and unable to capture, that it is being suspended, +and subsequently resumed. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + +# OPTIONS +There are no available options. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-port.1.md b/docs/man/docker-port.1.md new file mode 100644 index 00000000..97cc61b7 --- /dev/null +++ b/docs/man/docker-port.1.md @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# SYNOPSIS +**docker port** CONTAINER [PRIVATE_PORT[/PROTO]] + +# DESCRIPTION +List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# EXAMPLES +You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or +ask for just a specific mapping: + + $ docker ps test + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + $ docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + $ docker port test 7890/tcp + 0.0.0.0:4321 + $ docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + $ docker port test 7890 + 0.0.0.0:4321 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-ps.1.md b/docs/man/docker-ps.1.md new file mode 100644 index 00000000..bf22d87d --- /dev/null +++ b/docs/man/docker-ps.1.md @@ -0,0 +1,76 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-ps - List containers + +# SYNOPSIS +**docker ps** +[**-a**|**--all**[=*false*]] +[**--before**[=*BEFORE*]] +[**-f**|**--filter**[=*[]*]] +[**-l**|**--latest**[=*false*]] +[**-n**[=*-1*]] +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] +[**-s**|**--size**[=*false*]] +[**--since**[=*SINCE*]] + + +# DESCRIPTION + +List the containers in the local repository. By default this show only +the running containers. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**--before**="" + Show only container created before Id or Name, include non-running ones. + +**-f**, **--filter**=[] + Provide filter values. Valid filters: + exited= - containers with exit code of + +**-l**, **--latest**=*true*|*false* + Show only the latest created container, include non-running ones. The default is *false*. + +**-n**=-1 + Show n last created containers, include non-running ones. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only display numeric IDs. The default is *false*. + +**-s**, **--size**=*true*|*false* + Display sizes. The default is *false*. + +**--since**="" + Show only containers created since Id or Name, include non-running ones. + +# EXAMPLES +# Display all containers, including non-running + + # docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain + 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell + c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds + 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike + +# Display only IDs of all containers, including non-running + + # docker ps -a -q + a87ecb4f327c + 01946d9d34d8 + c1d3b0166030 + 41d50ecd2f57 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/docs/man/docker-pull.1.md b/docs/man/docker-pull.1.md new file mode 100644 index 00000000..01c664f5 --- /dev/null +++ b/docs/man/docker-pull.1.md @@ -0,0 +1,66 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pull - Pull an image or a repository from the registry + +# SYNOPSIS +**docker pull** +[**-a**|**--all-tags**[=*false*]] +NAME[:TAG] + +# DESCRIPTION + +This command pulls down an image or a repository from the registry. If +there is more than one image for a repository (e.g., fedora) then all +images for that repository name are pulled down including any tags. +It is also possible to specify a non-default registry to pull from. + +# OPTIONS +**-a**, **--all-tags**=*true*|*false* + Download all tagged images in the repository. The default is *false*. + +# EXAMPLES + +# Pull a repository with multiple images +# Note that if the image is previously downloaded then the status would be +# 'Status: Image is up to date for fedora' + + $ sudo docker pull fedora + Pulling repository fedora + ad57ef8d78d7: Download complete + 105182bb5e8b: Download complete + 511136ea3c5a: Download complete + 73bd853d2ea5: Download complete + + Status: Downloaded newer image for fedora + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB + fedora 20 105182bb5e8b 5 days ago 372.7 MB + fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB + fedora latest 105182bb5e8b 5 days ago 372.7 MB + +# Pull an image, manually specifying path to the registry and tag +# Note that if the image is previously downloaded then the status would be +# 'Status: Image is up to date for registry.hub.docker.com/fedora:20' + + $ sudo docker pull registry.hub.docker.com/fedora:20 + Pulling repository fedora + 3f2fed40e4b0: Download complete + 511136ea3c5a: Download complete + fd241224e9cf: Download complete + + Status: Downloaded newer image for registry.hub.docker.com/fedora:20 + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + fedora 20 3f2fed40e4b0 4 days ago 372.7 MB + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/docs/man/docker-push.1.md b/docs/man/docker-push.1.md new file mode 100644 index 00000000..8523cb53 --- /dev/null +++ b/docs/man/docker-push.1.md @@ -0,0 +1,49 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-push - Push an image or a repository to the registry + +# SYNOPSIS +**docker push** +NAME[:TAG] + +# DESCRIPTION +Push an image or a repository to a registry. The default registry is the Docker +Hub located at [hub.docker.com](https://hub.docker.com/). However the +image can be pushed to another, perhaps private, registry as demonstrated in +the example below. + +# OPTIONS +There are no available options. + +# EXAMPLES + +# Pushing a new image to a registry + +First save the new image by finding the container ID (using **docker ps**) +and then committing it to a new image name: + + # docker commit c16378f943fe rhel-httpd + +Now push the image to the registry using the image ID. In this example +the registry is on host named registry-host and listening on port 5000. +Default Docker commands will push to the default `hub.docker.com` +registry. Instead, push to the local registry, which is on a host called +registry-host*. To do this, tag the image with the host name or IP +address, and the port of the registry: + + # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + # docker push registry-host:5000/myadmin/rhel-httpd + +Check that this worked by running: + + # docker images + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-restart.1.md b/docs/man/docker-restart.1.md new file mode 100644 index 00000000..2a08caa5 --- /dev/null +++ b/docs/man/docker-restart.1.md @@ -0,0 +1,22 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-restart - Restart a running container + +# SYNOPSIS +**docker restart** +[**-t**|**--time**[=*10*]] + CONTAINER [CONTAINER...] + +# DESCRIPTION +Restart each container listed. + +# OPTIONS +**-t**, **--time**=10 + Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-rm.1.md b/docs/man/docker-rm.1.md new file mode 100644 index 00000000..bae6a7ea --- /dev/null +++ b/docs/man/docker-rm.1.md @@ -0,0 +1,53 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rm - Remove one or more containers + +# SYNOPSIS +**docker rm** +[**-f**|**--force**[=*false*]] +[**-l**|**--link**[=*false*]] +[**-v**|**--volumes**[=*false*]] + CONTAINER [CONTAINER...] + +# DESCRIPTION + +**docker rm** will remove one or more containers from the host node. The +container name or ID can be used. This does not remove images. You cannot +remove a running container unless you use the \fB-f\fR option. To see all +containers on a host use the **docker ps -a** command. + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force the removal of a running container (uses SIGKILL). The default is *false*. + +**-l**, **--link**=*true*|*false* + Remove the specified link and not the underlying container. The default is *false*. + +**-v**, **--volumes**=*true*|*false* + Remove the volumes associated with the container. The default is *false*. + +# EXAMPLES + +##Removing a container using its ID## + +To remove a container using its ID, find either from a **docker ps -a** +command, or use the ID returned from the **docker run** command, or retrieve +it from a file used to store it using the **docker run --cidfile**: + + docker rm abebf7571666 + +##Removing a container using the container name## + +The name of the container can be found using the **docker ps -a** +command. The use that name as follows: + + docker rm hopeful_morse + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/docs/man/docker-rmi.1.md b/docs/man/docker-rmi.1.md new file mode 100644 index 00000000..08d740a3 --- /dev/null +++ b/docs/man/docker-rmi.1.md @@ -0,0 +1,38 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rmi - Remove one or more images + +# SYNOPSIS +**docker rmi** +[**-f**|**--force**[=*false*]] +[**--no-prune**[=*false*]] +IMAGE [IMAGE...] + +# DESCRIPTION + +This will remove one or more images from the host node. This does not +remove images from a registry. You cannot remove an image of a running +container unless you use the **-f** option. To see all images on a host +use the **docker images** command. + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force removal of the image. The default is *false*. + +**--no-prune**=*true*|*false* + Do not delete untagged parents. The default is *false*. + +# EXAMPLES + +## Removing an image + +Here is an example of removing and image: + + docker rmi fedora/httpd + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md new file mode 100644 index 00000000..e3d84674 --- /dev/null +++ b/docs/man/docker-run.1.md @@ -0,0 +1,427 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-run - Run a command in a new container + +# SYNOPSIS +**docker run** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**-c**|**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpuset**[=*CPUSET*]] +[**-d**|**--detach**[=*false*]] +[**--device**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--dns**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**-i**|**--interactive**[=*false*]] +[**--security-opt**[=*[]*]] +[**--link**[=*[]*]] +[**--lxc-conf**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--name**[=*NAME*]] +[**--net**[=*"bridge"*]] +[**-P**|**--publish-all**[=*false*]] +[**-p**|**--publish**[=*[]*]] +[**--privileged**[=*false*]] +[**--restart**[=*POLICY*]] +[**--rm**[=*false*]] +[**--sig-proxy**[=*true*]] +[**-t**|**--tty**[=*false*]] +[**-u**|**--user**[=*USER*]] +[**-v**|**--volume**[=*[]*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] + IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Run a process in a new container. **docker run** starts a process with its own +file system, its own networking, and its own isolated process tree. The IMAGE +which starts the process may define defaults related to the process that will be +run in the container, the networking to expose, and more, but **docker run** +gives final control to the operator or administrator who starts the container +from the image. For that reason **docker run** has more options than any other +Docker command. + +If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and +all image dependencies, from the repository in the same way running **docker +pull** IMAGE, before it starts the container from that image. + +# OPTIONS + +**-a**, **--attach**=*stdin*|*stdout*|*stderr* + Attach to stdin, stdout or stderr. In foreground mode (the default when +**-d** is not specified), **docker run** can start the process in the container +and attach the console to the process’s standard input, output, and standard +error. It can even pretend to be a TTY (this is what most commandline +executables expect) and pass along signals. The **-a** option can be set for +each of stdin, stdout, and stderr. + +**--add-host**=*hostname*:*ip* + Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** +option can be set multiple times. + +**-c**, **--cpu-shares**=0 + CPU shares in relative weight. You can increase the priority of a container +with the -c option. By default, all containers run at the same priority and get +the same proportion of CPU cycles, but you can tell the kernel to give more +shares of CPU time to one or more containers when you start them via **docker +run**. + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cidfile**="" + Write the container ID to the file + +**--cpuset**="" + CPUs in which to allow execution (0-3, 0,1) + +**-d**, **--detach**=*true*|*false* + Detached mode. This runs the container in the background. It outputs the new +container's ID and any error messages. At any time you can run **docker ps** in +the other shell to view a list of the running containers. You can reattach to a +detached container with **docker attach**. If you choose to run a container in +the detached mode, then you cannot use the **-rm** option. + + When attached in the tty mode, you can detach from a running container without +stopping the process by pressing the keys CTRL-P CTRL-Q. +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + +**--dns-search**=[] + Set custom DNS search domains + +**--dns**=*IP-address* + Set custom DNS servers. This option can be used to override the DNS +configuration passed to the container. Typically this is necessary when the +host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this +is the case the **--dns** flags is necessary for every run. + +**-e**, **--env**=*environment* + Set environment variables. This option allows you to specify arbitrary +environment variables that are available for the process that will be launched +inside of the container. + + +**--entrypoint**=*command* + This option allows you to overwrite the default entrypoint of the image that +is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND +because it specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ENTRYPOINT gives a container its +default nature or behavior, so that when you set an ENTRYPOINT you can run the +container as if it were that binary, complete with default options, and you can +pass in more options via the COMMAND. But, sometimes an operator may want to run +something else inside the container, so you can override the default ENTRYPOINT +at runtime by using a **--entrypoint** and a string to specify the new +ENTRYPOINT. + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=*port* + Expose a port from the container without publishing it to your host. A +containers port can be exposed to other containers in three ways: 1) The +developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) +the operator can use the **--expose** option with **docker run**, or 3) the +container can be started with the **--link**. + +**-h**, **--hostname**=*hostname* + Sets the container host name that is available inside the container. + +**-i**, **--interactive**=*true*|*false* + When set to true, keep stdin open even if not attached. The default is false. + +**--security-opt**=*secdriver*:*name*:*value* + "label:user:USER" : Set the label user for the container + "label:role:ROLE" : Set the label role for the container + "label:type:TYPE" : Set the label type for the container + "label:level:LEVEL" : Set the label level for the container + "label:disable" : Turn off label confinement for the container + +**--link**=*name*:*alias* + Add link to another container. The format is name:alias. If the operator +uses **--link** when starting the new client container, then the client +container can access the exposed port via a private networking interface. Docker +will set some environment variables in the client container to help indicate +which interface and port to use. + +**--lxc-conf**=[] + (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + +**-m**, **--memory**=*memory-limit* + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the -m memory setting can be larger than physical +RAM. If a limit of 0 is specified, the container's memory is not limited. The +actual limit may be rounded up to a multiple of the operating system's page +size, if it is not already. The memory limit should be formatted as follows: +``, where unit = b, k, m or g. + +**--name**=*name* + Assign a name to the container. The operator can identify a container in +three ways: + + UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) + UUID short identifier (“f78375b1c487”) + Name (“jonah”) + +The UUID identifiers come from the Docker daemon, and if a name is not assigned +to the container with **--name** then the daemon will also generate a random +string name. The name is useful when defining links (see **--link**) (or any +other place you need to identify a container). This works for both background +and foreground Docker containers. + +**--net**="bridge" + Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + +**-P**, **--publish-all**=*true*|*false* + When set to true publish all exposed ports to the host interfaces. The +default is false. If the operator uses -P (or -p) then Docker will make the +exposed port accessible on the host and the ports will be available to any +client that can reach the host. When using -P, Docker will bind the exposed +ports to a random port on the host between 49153 and 65535. To find the +mapping between the host ports and the exposed ports, use **docker port**. + +**-p**, **--publish**=[] + Publish a container's port to the host (format: ip:hostPort:containerPort | +ip::containerPort | hostPort:containerPort | containerPort) (use **docker port** to see the +actual mapping) + +**--privileged**=*true*|*false* + Give extended privileges to this container. By default, Docker containers are +“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the +Docker container. This is because by default a container is not allowed to +access any devices. A “privileged” container is given access to all devices. + +When the operator executes **docker run --privileged**, Docker will enable access +to all devices on the host as well as set some configuration in AppArmor to +allow the container nearly all the same access to the host as processes running +outside of a container on the host. + + +**--rm**=*true*|*false* + Automatically remove the container when it exits (incompatible with -d). The default is *false*. + +**--sig-proxy**=*true*|*false* + Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. + +**-t**, **--tty**=*true*|*false* + When set to true Docker can allocate a pseudo-tty and attach to the standard +input of any container. This can be used, for example, to run a throwaway +interactive shell. The default is value is false. + +**-u**, **--user**="" + Username or UID + + +**-v**, **--volume**=*volume*[:ro|:rw] + Bind mount a volume to the container. + +The **-v** option can be used one or +more times to add one or more mounts to a container. These mounts can then be +used in other containers using the **--volumes-from** option. + +The volume may be optionally suffixed with :ro or :rw to mount the volumes in +read-only or read-write mode, respectively. By default, the volumes are mounted +read-write. See examples. + +**--volumes-from**=*container-id*[:ro|:rw] + Will mount volumes from the specified container identified by container-id. +Once a volume is mounted in a one container it can be shared with other +containers using the **--volumes-from** option when running those other +containers. The volumes can be shared even if the original container with the +mount is not running. + +The container ID may be optionally suffixed with :ro or +:rw to mount the volumes in read-only or read-write mode, respectively. By +default, the volumes are mounted in the same mode (read write or read only) as +the reference container. + + +**-w**, **--workdir**=*directory* + Working directory inside the container. The default working directory for +running binaries within a container is the root directory (/). The developer can +set a different default with the Dockerfile WORKDIR instruction. The operator +can override the working directory by using the **-w** option. + + +**IMAGE** + The image name or ID. You can specify a version of an image you'd like to run + the container with by adding image:tag to the command. For example, + `docker run ubuntu:14.04`. + + + +**COMMAND** + The command or program to run inside the image. + + +**ARG** + The arguments for the command to be run in the container. + +# EXAMPLES + +## Exposing log messages from the container to the host's log + +If you want messages that are logged in your container to show up in the host's +syslog/journal then you should bind mount the /dev/log directory as follows. + + # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash + +From inside the container you can test this by sending a message to the log. + + (bash)# logger "Hello from my container" + +Then exit and check the journal. + + # exit + + # journalctl -b | grep Hello + +This should list the message sent to logger. + +## Attaching to one or more from STDIN, STDOUT, STDERR + +If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) +. You can specify to which of the three standard streams (stdin, stdout, stderr) +you’d like to connect instead, as in: + + # docker run -a stdin -a stdout -i -t fedora /bin/bash + +## Linking Containers + +The link feature allows multiple containers to communicate with each other. For +example, a container whose Dockerfile has exposed port 80 can be run and named +as follows: + + # docker run --name=link-test -d -i -t fedora/httpd + +A second container, in this case called linker, can communicate with the httpd +container, named link-test, by running with the **--link=:** + + # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash + +Now the container linker is linked to container link-test with the alias lt. +Running the **env** command in the linker container shows environment variables + with the LT (alias) context (**LT_**) + + # env + HOSTNAME=668231cb0978 + TERM=xterm + LT_PORT_80_TCP=tcp://172.17.0.3:80 + LT_PORT_80_TCP_PORT=80 + LT_PORT_80_TCP_PROTO=tcp + LT_PORT=tcp://172.17.0.3:80 + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + PWD=/ + LT_NAME=/linker/lt + SHLVL=1 + HOME=/ + LT_PORT_80_TCP_ADDR=172.17.0.3 + _=/usr/bin/env + +When linking two containers Docker will use the exposed ports of the container +to create a secure tunnel for the parent to access. + + +## Mapping Ports for External Usage + +The exposed port of an application can be mapped to a host port using the **-p** +flag. For example, a httpd port 80 can be mapped to the host port 8080 using the +following: + + # docker run -p 8080:80 -d -i -t fedora/httpd + +## Creating and Mounting a Data Volume Container + +Many applications require the sharing of persistent data across several +containers. Docker allows you to create a Data Volume Container that other +containers can mount from. For example, create a named container that contains +directories /var/volume1 and /tmp/volume2. The image will need to contain these +directories so a couple of RUN mkdir instructions might be required for you +fedora-data image: + + # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true + # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash + +Multiple --volumes-from parameters will bring together multiple data volumes from +multiple containers. And it's possible to mount the volumes that came from the +DATA container in yet another container via the fedora-container1 intermediary +container, allowing to abstract the actual data source from users of that data: + + # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash + +## Mounting External Volumes + +To mount a host directory as a container volume, specify the absolute path to +the directory and the absolute path for the container directory separated by a +colon: + + # docker run -v /var/db:/data1 -i -t fedora bash + +When using SELinux, be aware that the host has no knowledge of container SELinux +policy. Therefore, in the above example, if SELinux policy is enforced, the +`/var/db` directory is not writable to the container. A "Permission Denied" +message will occur and an avc: message in the host's syslog. + + +To work around this, at time of writing this man page, the following command +needs to be run in order for the proper SELinux policy type label to be attached +to the host directory: + + # chcon -Rt svirt_sandbox_file_t /var/db + + +Now, writing to the /data1 volume in the container will be allowed and the +changes will also be reflected on the host in /var/db. + +## Using alternative security labeling + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. For example, you can specify the MCS/MLS level, a +requirement for MLS systems. Specifying the level in the following command +allows you to share the same content between containers. + + # docker run --security-opt label:level:s0:c100,c200 -i -t fedora bash + +An MLS example might be: + + # docker run --security-opt label:level:TopSecret -i -t rhel7 bash + +To disable the security labeling for this container versus running with the +`--permissive` flag, use the following command: + + # docker run --security-opt label:disable -i -t fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + # docker run --security-opt label:type:svirt_apache_t -i -t centos bash + +Note: + +You would have to write policy defining a `svirt_apache_t` type. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/docs/man/docker-save.1.md b/docs/man/docker-save.1.md new file mode 100644 index 00000000..ea78475b --- /dev/null +++ b/docs/man/docker-save.1.md @@ -0,0 +1,37 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-save - Save an image to a tar archive (streamed to STDOUT by default) + +# SYNOPSIS +**docker save** +[**-o**|**--output**[=*OUTPUT*]] +IMAGE + +# DESCRIPTION +Produces a tarred repository to the standard output stream. Contains all +parent layers, and all tags + versions, or specified repo:tag. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES + +Save all fedora repository images to a fedora-all.tar and save the latest +fedora image to a fedora-latest.tar: + + $ sudo docker save fedora > fedora-all.tar + $ sudo docker save --output=fedora-latest.tar fedora:latest + $ ls -sh fedora-all.tar + 721M fedora-all.tar + $ ls -sh fedora-latest.tar + 367M fedora-latest.tar + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-search.1.md b/docs/man/docker-search.1.md new file mode 100644 index 00000000..3937b870 --- /dev/null +++ b/docs/man/docker-search.1.md @@ -0,0 +1,58 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-search - Search the Docker Hub for images + +# SYNOPSIS +**docker search** +[**--automated**[=*false*]] +[**--no-trunc**[=*false*]] +[**-s**|**--stars**[=*0*]] +TERM + +# DESCRIPTION + +Search an index for an image with that matches the term TERM. The table +of images returned displays the name, description (truncated by default), +number of stars awarded, whether the image is official, and whether it +is automated. + +# OPTIONS +**--automated**=*true*|*false* + Only show automated builds. The default is *false*. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-s**, **--stars**=0 + Only displays with at least x stars + +# EXAMPLES + +## Search the registry for ranked images + +Search the registry for the term 'fedora' and only display those images +ranked 3 or higher: + + $ sudo docker search -s 3 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + mattdm/fedora A basic Fedora image corresponding roughly... 50 + fedora (Semi) Official Fedora base image. 38 + mattdm/fedora-small A small Fedora image on which to build. Co... 8 + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + +## Search the registry for automated images + +Search the registry for the term 'fedora' and only display automated images +ranked 1 or higher: + + $ sudo docker search -s 1 -t fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-start.1.md b/docs/man/docker-start.1.md new file mode 100644 index 00000000..e23fd70a --- /dev/null +++ b/docs/man/docker-start.1.md @@ -0,0 +1,27 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-start - Restart a stopped container + +# SYNOPSIS +**docker start** +[**-a**|**--attach**[=*false*]] +[**-i**|**--interactive**[=*false*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Start a stopped container. + +# OPTIONS +**-a**, **--attach**=*true*|*false* + Attach container's STDOUT and STDERR and forward all signals to the process. The default is *false*. + +**-i**, **--interactive**=*true*|*false* + Attach container's STDIN. The default is *false*. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-stop.1.md b/docs/man/docker-stop.1.md new file mode 100644 index 00000000..0cc19918 --- /dev/null +++ b/docs/man/docker-stop.1.md @@ -0,0 +1,23 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stop - Stop a running container by sending SIGTERM and then SIGKILL after a grace period + +# SYNOPSIS +**docker stop** +[**-t**|**--time**[=*10*]] + CONTAINER [CONTAINER...] + +# DESCRIPTION +Stop a running container (Send SIGTERM, and then SIGKILL after + grace period) + +# OPTIONS +**-t**, **--time**=10 + Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-tag.1.md b/docs/man/docker-tag.1.md new file mode 100644 index 00000000..a42ebe77 --- /dev/null +++ b/docs/man/docker-tag.1.md @@ -0,0 +1,59 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-tag - Tag an image into a repository + +# SYNOPSIS +**docker tag** +[**-f**|**--force**[=*false*]] + IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] + +# DESCRIPTION +This will give a new alias to an image in the repository. This refers to the +entire image name including the optional TAG after the ':'. + +# "OPTIONS" +**-f**, **--force**=*true*|*false* + When set to true, force the alias. The default is *false*. + +**REGISTRYHOST** + The hostname of the registry if required. This may also include the port +separated by a ':' + +**USERNAME** + The username or other qualifying identifier for the image. + +**NAME** + The image name. + +**TAG** + The tag you are assigning to the image. Though this is arbitrary it is +recommended to be used for a version to distinguish images with the same name. +Note that here TAG is a part of the overall name or "tag". + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force. The default is *false*. + +# EXAMPLES + +## Giving an image a new alias + +Here is an example of aliasing an image (e.g., 0e5574283393) as "httpd" and +tagging it into the "fedora" repository with "version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image for a private repository + +To push an image to an private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/docs/man/docker-top.1.md b/docs/man/docker-top.1.md new file mode 100644 index 00000000..9781739c --- /dev/null +++ b/docs/man/docker-top.1.md @@ -0,0 +1,31 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-top - Display the running processes of a container + +# SYNOPSIS +**docker top** +CONTAINER [ps OPTIONS] + +# DESCRIPTION + +Look up the running process of the container. ps-OPTION can be any of the + options you would pass to a Linux ps command. + +# OPTIONS +There are no available options. + +# EXAMPLES + +Run **docker top** with the ps option of -x: + + $ sudo docker top 8601afda2b -x + PID TTY STAT TIME COMMAND + 16623 ? Ss 0:00 sleep 99999 + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-unpause.1.md b/docs/man/docker-unpause.1.md new file mode 100644 index 00000000..dfce1632 --- /dev/null +++ b/docs/man/docker-unpause.1.md @@ -0,0 +1,24 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-unpause - Unpause all processes within a container + +# SYNOPSIS +**docker unpause** +CONTAINER + +# DESCRIPTION + +The `docker unpause` command uses the cgroups freezer to un-suspend all +processes in a container. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + +# OPTIONS +There are no available options. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-version.1.md b/docs/man/docker-version.1.md new file mode 100644 index 00000000..9c029b23 --- /dev/null +++ b/docs/man/docker-version.1.md @@ -0,0 +1,15 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-version - Show the Docker version information. + +# SYNOPSIS +**docker version** + + +# OPTIONS +There are no available options. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker-wait.1.md b/docs/man/docker-wait.1.md new file mode 100644 index 00000000..798f6d65 --- /dev/null +++ b/docs/man/docker-wait.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-wait - Block until a container stops, then print its exit code. + +# SYNOPSIS +**docker wait** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Block until a container stops, then print its exit code. + +# OPTIONS +There are no available options. + +# EXAMPLES + + $ sudo docker run -d fedora sleep 99 + 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 + $ sudo docker wait 079b83f558a2bc + 0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md new file mode 100644 index 00000000..26f5c213 --- /dev/null +++ b/docs/man/docker.1.md @@ -0,0 +1,208 @@ +% DOCKER(1) Docker User Manuals +% William Henry +% APRIL 2014 +# NAME +docker \- Docker image and container command line interface + +# SYNOPSIS +**docker** [OPTIONS] COMMAND [arg...] + +# DESCRIPTION +**docker** has two distinct functions. It is used for starting the Docker +daemon and to run the CLI (i.e., to command the daemon to manage images, +containers etc.) So **docker** is both a server, as a daemon, and a client +to the daemon, through the CLI. + +To run the Docker daemon you do not specify any of the commands listed below but +must specify the **-d** option. The other options listed below are for the +daemon only. + +The Docker CLI has over 30 commands. The commands are listed below and each has +its own man page which explain usage and arguments. + +To see the man page for a command run **man docker **. + +# OPTIONS +**-D**=*true*|*false* + Enable debug mode. Default is false. + +**-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host:port] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + +**--api-enable-cors**=*true*|*false* + Enable CORS headers in the remote API. Default is false. + +**-b**="" + Attach containers to a pre\-existing network bridge; use 'none' to disable container networking + +**--bip**="" + Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b + +**-d**=*true*|*false* + Enable daemon mode. Default is false. + +**--dns**="" + Force Docker to use specific DNS servers + +**-g**="" + Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. + + +**--fixed-cidr**="" + IPv4 subnet for fixed IPs (ex: 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) + +**--icc**=*true*|*false* + Enable inter\-container communication. Default is true. + +**--ip**="" + Default IP address to use when binding container ports. Default is `0.0.0.0`. + +**--ip-masq**=*true*|*false* + Enable IP masquerading for bridge's IP range. Default is true. + +**--iptables**=*true*|*false* + Disable Docker's addition of iptables rules. Default is true. + +**--mtu**=VALUE + Set the containers network mtu. Default is `1500`. + +**-p**="" + Path to use for daemon PID file. Default is `/var/run/docker.pid` + +**--registry-mirror=:// + Prepend a registry mirror to be used for image pulls. May be specified multiple times. + +**-s**="" + Force the Docker runtime to use a specific storage driver. + +**-v**=*true*|*false* + Print version information and quit. Default is false. + +**--selinux-enabled**=*true*|*false* + Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver. + +# COMMANDS +**docker-attach(1)** + Attach to a running container + +**docker-build(1)** + Build an image from a Dockerfile + +**docker-commit(1)** + Create a new image from a container's changes + +**docker-cp(1)** + Copy files/folders from a container's filesystem to the host at path + +**docker-create(1)** + Create a new container + +**docker-diff(1)** + Inspect changes on a container's filesystem + +**docker-events(1)** + Get real time events from the server + +**docker-exec(1)** + Run a command in a running container + +**docker-export(1)** + Stream the contents of a container as a tar archive + +**docker-history(1)** + Show the history of an image + +**docker-images(1)** + List images + +**docker-import(1)** + Create a new filesystem image from the contents of a tarball + +**docker-info(1)** + Display system-wide information + +**docker-inspect(1)** + Return low-level information on a container + +**docker-kill(1)** + Kill a running container (which includes the wrapper process and everything +inside it) + +**docker-load(1)** + Load an image from a tar archive + +**docker-login(1)** + Register or Login to a Docker registry server + +**docker-logout(1)** + Log the user out of a Docker registry server + +**docker-logs(1)** + Fetch the logs of a container + +**docker-pause(1)** + Pause all processes within a container + +**docker-port(1)** + Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + +**docker-ps(1)** + List containers + +**docker-pull(1)** + Pull an image or a repository from a Docker registry server + +**docker-push(1)** + Push an image or a repository to a Docker registry server + +**docker-restart(1)** + Restart a running container + +**docker-rm(1)** + Remove one or more containers + +**docker-rmi(1)** + Remove one or more images + +**docker-run(1)** + Run a command in a new container + +**docker-save(1)** + Save an image to a tar archive + +**docker-search(1)** + Search for an image in the Docker index + +**docker-start(1)** + Start a stopped container + +**docker-stop(1)** + Stop a running container + +**docker-tag(1)** + Tag an image into a repository + +**docker-top(1)** + Lookup the running processes of a container + +**docker-unpause(1)** + Unpause all processes within a container + +**docker-version(1)** + Show the Docker version information + +**docker-wait(1)** + Block until a container stops, then print its exit code + +# EXAMPLES + +For specific examples please see the man page for the specific Docker command. +For example: + + man docker run + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based + on docker.com source material and internal work. diff --git a/docs/man/md2man-all.sh b/docs/man/md2man-all.sh new file mode 100755 index 00000000..97c65c93 --- /dev/null +++ b/docs/man/md2man-all.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# get into this script's directory +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +[ "$1" = '-q' ] || { + set -x + pwd +} + +for FILE in *.md; do + base="$(basename "$FILE")" + name="${base%.md}" + num="${name##*.}" + if [ -z "$num" -o "$name" = "$num" ]; then + # skip files that aren't of the format xxxx.N.md (like README.md) + continue + fi + mkdir -p "./man${num}" + go-md2man -in "$FILE" -out "./man${num}/${name}" +done diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml new file mode 100644 index 00000000..25f84b5a --- /dev/null +++ b/docs/mkdocs.yml @@ -0,0 +1,150 @@ +site_name: Docker Documentation +#site_url: http://docs.docker.com/ +site_url: / +site_description: Documentation for fast and lightweight Docker container based virtualization framework. +site_favicon: img/favicon.png + +dev_addr: '0.0.0.0:8000' + +repo_url: https://github.com/docker/docker/ + +docs_dir: sources + +include_search: true + +use_absolute_urls: true + +# theme: docker +theme_dir: ./theme/mkdocs/ +theme_center_lead: false +include_search: true + +copyright: Copyright © 2014, Docker, Inc. +google_analytics: ['UA-6096819-11', 'docker.io'] + +pages: + +# Introduction: +- ['index.md', 'About', 'Docker'] +- ['release-notes.md', 'About', 'Release Notes'] +- ['introduction/index.md', '**HIDDEN**'] +- ['introduction/understanding-docker.md', 'About', 'Understanding Docker'] + +# Installation: +- ['installation/index.md', '**HIDDEN**'] +- ['installation/mac.md', 'Installation', 'Mac OS X'] +- ['installation/ubuntulinux.md', 'Installation', 'Ubuntu'] +- ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux'] +- ['installation/oracle.md', 'Installation', 'Oracle Linux'] +- ['installation/centos.md', 'Installation', 'CentOS'] +- ['installation/debian.md', 'Installation', 'Debian'] +- ['installation/gentoolinux.md', 'Installation', 'Gentoo'] +- ['installation/google.md', 'Installation', 'Google Cloud Platform'] +- ['installation/rackspace.md', 'Installation', 'Rackspace Cloud'] +- ['installation/amazon.md', 'Installation', 'Amazon EC2'] +- ['installation/softlayer.md', 'Installation', 'IBM Softlayer'] +- ['installation/archlinux.md', 'Installation', 'Arch Linux'] +- ['installation/frugalware.md', 'Installation', 'FrugalWare'] +- ['installation/fedora.md', 'Installation', 'Fedora'] +- ['installation/openSUSE.md', 'Installation', 'openSUSE'] +- ['installation/cruxlinux.md', 'Installation', 'CRUX Linux'] +- ['installation/windows.md', 'Installation', 'Microsoft Windows'] +- ['installation/binaries.md', 'Installation', 'Binaries'] + +# User Guide: +- ['userguide/index.md', 'User Guide', 'The Docker User Guide' ] +- ['userguide/dockerhub.md', 'User Guide', 'Getting Started with Docker Hub' ] +- ['userguide/dockerizing.md', 'User Guide', 'Dockerizing Applications' ] +- ['userguide/usingdocker.md', 'User Guide', 'Working with Containers' ] +- ['userguide/dockerimages.md', 'User Guide', 'Working with Docker Images' ] +- ['userguide/dockerlinks.md', 'User Guide', 'Linking containers together' ] +- ['userguide/dockervolumes.md', 'User Guide', 'Managing data in containers' ] +- ['userguide/dockerrepos.md', 'User Guide', 'Working with Docker Hub' ] +- ['userguide/level1.md', '**HIDDEN**' ] +- ['userguide/level2.md', '**HIDDEN**' ] + +# Docker Hub docs: +- ['docker-hub/index.md', 'Docker Hub', 'Docker Hub' ] +- ['docker-hub/accounts.md', 'Docker Hub', 'Accounts'] +- ['docker-hub/repos.md', 'Docker Hub', 'Repositories'] +- ['docker-hub/builds.md', 'Docker Hub', 'Automated Builds'] +- ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repo Guidelines'] + +# Examples: +- ['examples/index.md', '**HIDDEN**'] +- ['examples/nodejs_web_app.md', 'Examples', 'Dockerizing a Node.js web application'] +- ['examples/mongodb.md', 'Examples', 'Dockerizing MongoDB'] +- ['examples/running_redis_service.md', 'Examples', 'Dockerizing a Redis service'] +- ['examples/postgresql_service.md', 'Examples', 'Dockerizing a PostgreSQL service'] +- ['examples/running_riak_service.md', 'Examples', 'Dockerizing a Riak service'] +- ['examples/running_ssh_service.md', 'Examples', 'Dockerizing an SSH service'] +- ['examples/couchdb_data_volumes.md', 'Examples', 'Dockerizing a CouchDB service'] +- ['examples/apt-cacher-ng.md', 'Examples', 'Dockerizing an Apt-Cacher-ng service'] + +# Articles +- ['articles/index.md', '**HIDDEN**'] +- ['articles/basics.md', 'Articles', 'Docker basics'] +- ['articles/networking.md', 'Articles', 'Advanced networking'] +- ['articles/security.md', 'Articles', 'Security'] +- ['articles/https.md', 'Articles', 'Running Docker with HTTPS'] +- ['articles/host_integration.md', 'Articles', 'Automatically starting containers'] +- ['articles/baseimages.md', 'Articles', 'Creating a base image'] +- ['articles/dockerfile_best-practices.md', 'Articles', 'Best practices for writing Dockerfiles'] +- ['articles/certificates.md', 'Articles', 'Using certificates for repository client verification'] +- ['articles/using_supervisord.md', 'Articles', 'Using Supervisor'] +- ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine'] +- ['articles/puppet.md', 'Articles', 'Using Puppet'] +- ['articles/chef.md', 'Articles', 'Using Chef'] +- ['articles/dsc.md', 'Articles', 'Using PowerShell DSC'] +- ['articles/ambassador_pattern_linking.md', 'Articles', 'Cross-Host linking using ambassador containers'] +- ['articles/runmetrics.md', 'Articles', 'Runtime metrics'] +- ['articles/b2d_volume_resize.md', 'Articles', 'Increasing a Boot2Docker volume'] + +# Reference +- ['reference/index.md', '**HIDDEN**'] +- ['reference/commandline/index.md', '**HIDDEN**'] +- ['reference/commandline/cli.md', 'Reference', 'Command line'] +- ['reference/builder.md', 'Reference', 'Dockerfile'] +- ['faq.md', 'Reference', 'FAQ'] +- ['reference/run.md', 'Reference', 'Run Reference'] +- ['reference/api/index.md', '**HIDDEN**'] +- ['reference/api/docker-io_api.md', 'Reference', 'Docker Hub API'] +- ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] +- ['reference/api/registry_api_client_libraries.md', 'Reference', 'Docker Registry API Client Libraries'] +- ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec'] +- ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] +- ['reference/api/docker_remote_api_v1.15.md', 'Reference', 'Docker Remote API v1.15'] +- ['reference/api/docker_remote_api_v1.14.md', 'Reference', 'Docker Remote API v1.14'] +- ['reference/api/docker_remote_api_v1.13.md', 'Reference', 'Docker Remote API v1.13'] +- ['reference/api/docker_remote_api_v1.12.md', 'Reference', 'Docker Remote API v1.12'] +- ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.11'] +- ['reference/api/docker_remote_api_v1.10.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.9.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.8.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.7.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.6.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.5.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.4.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.3.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] +- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries'] +- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API'] + +- ['jsearch.md', '**HIDDEN**'] + +# - ['static_files/README.md', 'static_files', 'README'] +- ['terms/index.md', '**HIDDEN**'] +- ['terms/layer.md', '**HIDDEN**'] +- ['terms/index.md', '**HIDDEN**'] +- ['terms/registry.md', '**HIDDEN**'] +- ['terms/container.md', '**HIDDEN**'] +- ['terms/repository.md', '**HIDDEN**'] +- ['terms/filesystem.md', '**HIDDEN**'] +- ['terms/image.md', '**HIDDEN**'] + +# Contribute: +- ['contributing/index.md', '**HIDDEN**'] +- ['contributing/contributing.md', 'Contribute', 'Contributing'] +- ['contributing/devenvironment.md', 'Contribute', 'Development environment'] diff --git a/docs/release.sh b/docs/release.sh new file mode 100755 index 00000000..cdb1a94c --- /dev/null +++ b/docs/release.sh @@ -0,0 +1,134 @@ +#!/usr/bin/env bash +set -e + +set -o pipefail + +usage() { + cat >&2 <<'EOF' +To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file +(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file) +and set the AWS_S3_BUCKET env var to the name of your bucket. + +If you're publishing the current release's documentation, also set `BUILD_ROOT=yes` + +make AWS_S3_BUCKET=docs-stage.docker.com docs-release + +will then push the documentation site to your s3 bucket. +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage + +VERSION=$(cat VERSION) + +if [ "$$AWS_S3_BUCKET" == "docs.docker.com" ]; then + if [ "${VERSION%-dev}" != "$VERSION" ]; then + echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)" + exit 1 + fi + cat > ./sources/robots.txt <<'EOF' +User-agent: * +Allow: / +EOF + +else + cat > ./sources/robots.txt <<'EOF' +User-agent: * +Disallow: / +EOF +fi + +# Remove the last version - 1.0.2-dev -> 1.0 +MAJOR_MINOR="v${VERSION%.*}" +export MAJOR_MINOR + +export BUCKET=$AWS_S3_BUCKET + +export AWS_CONFIG_FILE=$(pwd)/awsconfig +[ -e "$AWS_CONFIG_FILE" ] || usage +export AWS_DEFAULT_PROFILE=$BUCKET + +echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE" + +setup_s3() { + echo "Create $BUCKET" + # Try creating the bucket. Ignore errors (it might already exist). + aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true + # Check access to the bucket. + echo "test $BUCKET exists" + aws s3 --profile $BUCKET ls s3://$BUCKET + # Make the bucket accessible through website endpoints. + echo "make $BUCKET accessible as a website" + #aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html + s3conf=$(cat s3_website.json | envsubst) + echo + echo $s3conf + echo + aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf" +} + +build_current_documentation() { + mkdocs build +} + +upload_current_documentation() { + src=site/ + dst=s3://$BUCKET$1 + + echo + echo "Uploading $src" + echo " to $dst" + echo + #s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst" + #aws s3 cp --profile $BUCKET --cache-control "max-age=3600" --acl public-read "site/search_content.json" "$dst" + + # a really complicated way to send only the files we want + # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go + # versions.html_fragment + endings=( json txt html xml css js gif png JPG ttf svg woff html_fragment ) + for i in ${endings[@]}; do + include="" + for j in ${endings[@]}; do + if [ "$i" != "$j" ];then + include="$include --exclude *.$j" + fi + done + include="--include *.$i $include" + echo "uploading *.$i" + run="aws s3 sync --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \ + $include \ + --exclude *.text* \ + --exclude *.*~ \ + --exclude *Dockerfile \ + --exclude *.DS_Store \ + --exclude *.psd \ + --exclude *.ai \ + --exclude *.eot \ + --exclude *.otf \ + --exclude *.rej \ + --exclude *.rst \ + --exclude *.orig \ + --exclude *.py \ + $src $dst" + echo "=======================" + #echo "$run" + #echo "=======================" + $run + done +} + +setup_s3 + +# Default to only building the version specific docs so we don't clober the latest by accident with old versions +if [ "$BUILD_ROOT" == "yes" ]; then + echo "Building root documentation" + build_current_documentation + upload_current_documentation +fi + +#build again with /v1.0/ prefix +sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml +echo "Building the /$MAJOR_MINOR/ documentation" +build_current_documentation +upload_current_documentation "/$MAJOR_MINOR/" diff --git a/docs/s3_website.json b/docs/s3_website.json new file mode 100644 index 00000000..224ba816 --- /dev/null +++ b/docs/s3_website.json @@ -0,0 +1,36 @@ +{ + "ErrorDocument": { + "Key": "jsearch/index.html" + }, + "IndexDocument": { + "Suffix": "index.html" + }, + "RoutingRules": [ + { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "jsearch/" } }, + { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-io/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } }, + { "Condition": { "KeyPrefixEquals": "examples/hello_world/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } }, + { "Condition": { "KeyPrefixEquals": "examples/python_web_app/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } }, + { "Condition": { "KeyPrefixEquals": "use/working_with_volumes/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockervolumes/" } }, + { "Condition": { "KeyPrefixEquals": "use/working_with_links_names/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } }, + { "Condition": { "KeyPrefixEquals": "use/workingwithrepository/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerrepos/" } }, + { "Condition": { "KeyPrefixEquals": "use/port_redirection" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } }, + { "Condition": { "KeyPrefixEquals": "use/networking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/networking/" } }, + { "Condition": { "KeyPrefixEquals": "use/puppet/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/puppet/" } }, + { "Condition": { "KeyPrefixEquals": "use/ambassador_pattern_linking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/ambassador_pattern_linking/" } }, + { "Condition": { "KeyPrefixEquals": "use/basics/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/basics/" } }, + { "Condition": { "KeyPrefixEquals": "use/chef/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/chef/" } }, + { "Condition": { "KeyPrefixEquals": "use/host_integration/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/host_integration/" } }, + { "Condition": { "KeyPrefixEquals": "docker-io/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/" } }, + { "Condition": { "KeyPrefixEquals": "examples/cfengine_process_management/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/cfengine_process_management/" } }, + { "Condition": { "KeyPrefixEquals": "examples/https/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/https/" } }, + { "Condition": { "KeyPrefixEquals": "examples/ambassador_pattern_linking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/ambassador_pattern_linking/" } }, + { "Condition": { "KeyPrefixEquals": "examples/using_supervisord/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/using_supervisord/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/registry_index_spec/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/hub_registry_spec/" } }, + { "Condition": { "KeyPrefixEquals": "use/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "examples/" } } + ] +} + diff --git a/engine/MAINTAINERS b/engine/MAINTAINERS new file mode 100644 index 00000000..aee10c84 --- /dev/null +++ b/engine/MAINTAINERS @@ -0,0 +1 @@ +Solomon Hykes (@shykes) diff --git a/engine/engine.go b/engine/engine.go new file mode 100644 index 00000000..5c708d40 --- /dev/null +++ b/engine/engine.go @@ -0,0 +1,260 @@ +package engine + +import ( + "bufio" + "fmt" + "io" + "os" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/utils" +) + +// Installer is a standard interface for objects which can "install" themselves +// on an engine by registering handlers. +// This can be used as an entrypoint for external plugins etc. +type Installer interface { + Install(*Engine) error +} + +type Handler func(*Job) Status + +var globalHandlers map[string]Handler + +func init() { + globalHandlers = make(map[string]Handler) +} + +func Register(name string, handler Handler) error { + _, exists := globalHandlers[name] + if exists { + return fmt.Errorf("Can't overwrite global handler for command %s", name) + } + globalHandlers[name] = handler + return nil +} + +func unregister(name string) { + delete(globalHandlers, name) +} + +// The Engine is the core of Docker. +// It acts as a store for *containers*, and allows manipulation of these +// containers by executing *jobs*. +type Engine struct { + handlers map[string]Handler + catchall Handler + hack Hack // data for temporary hackery (see hack.go) + id string + Stdout io.Writer + Stderr io.Writer + Stdin io.Reader + Logging bool + tasks sync.WaitGroup + l sync.RWMutex // lock for shutdown + shutdown bool + onShutdown []func() // shutdown handlers +} + +func (eng *Engine) Register(name string, handler Handler) error { + _, exists := eng.handlers[name] + if exists { + return fmt.Errorf("Can't overwrite handler for command %s", name) + } + eng.handlers[name] = handler + return nil +} + +func (eng *Engine) RegisterCatchall(catchall Handler) { + eng.catchall = catchall +} + +// New initializes a new engine. +func New() *Engine { + eng := &Engine{ + handlers: make(map[string]Handler), + id: utils.RandomString(), + Stdout: os.Stdout, + Stderr: os.Stderr, + Stdin: os.Stdin, + Logging: true, + } + eng.Register("commands", func(job *Job) Status { + for _, name := range eng.commands() { + job.Printf("%s\n", name) + } + return StatusOK + }) + // Copy existing global handlers + for k, v := range globalHandlers { + eng.handlers[k] = v + } + return eng +} + +func (eng *Engine) String() string { + return fmt.Sprintf("%s", eng.id[:8]) +} + +// Commands returns a list of all currently registered commands, +// sorted alphabetically. +func (eng *Engine) commands() []string { + names := make([]string, 0, len(eng.handlers)) + for name := range eng.handlers { + names = append(names, name) + } + sort.Strings(names) + return names +} + +// Job creates a new job which can later be executed. +// This function mimics `Command` from the standard os/exec package. +func (eng *Engine) Job(name string, args ...string) *Job { + job := &Job{ + Eng: eng, + Name: name, + Args: args, + Stdin: NewInput(), + Stdout: NewOutput(), + Stderr: NewOutput(), + env: &Env{}, + closeIO: true, + } + if eng.Logging { + job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr)) + } + + // Catchall is shadowed by specific Register. + if handler, exists := eng.handlers[name]; exists { + job.handler = handler + } else if eng.catchall != nil && name != "" { + // empty job names are illegal, catchall or not. + job.handler = eng.catchall + } + return job +} + +// OnShutdown registers a new callback to be called by Shutdown. +// This is typically used by services to perform cleanup. +func (eng *Engine) OnShutdown(h func()) { + eng.l.Lock() + eng.onShutdown = append(eng.onShutdown, h) + eng.l.Unlock() +} + +// Shutdown permanently shuts down eng as follows: +// - It refuses all new jobs, permanently. +// - It waits for all active jobs to complete (with no timeout) +// - It calls all shutdown handlers concurrently (if any) +// - It returns when all handlers complete, or after 15 seconds, +// whichever happens first. +func (eng *Engine) Shutdown() { + eng.l.Lock() + if eng.shutdown { + eng.l.Unlock() + return + } + eng.shutdown = true + eng.l.Unlock() + // We don't need to protect the rest with a lock, to allow + // for other calls to immediately fail with "shutdown" instead + // of hanging for 15 seconds. + // This requires all concurrent calls to check for shutdown, otherwise + // it might cause a race. + + // Wait for all jobs to complete. + // Timeout after 5 seconds. + tasksDone := make(chan struct{}) + go func() { + eng.tasks.Wait() + close(tasksDone) + }() + select { + case <-time.After(time.Second * 5): + case <-tasksDone: + } + + // Call shutdown handlers, if any. + // Timeout after 10 seconds. + var wg sync.WaitGroup + for _, h := range eng.onShutdown { + wg.Add(1) + go func(h func()) { + defer wg.Done() + h() + }(h) + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-time.After(time.Second * 10): + case <-done: + } + return +} + +// IsShutdown returns true if the engine is in the process +// of shutting down, or already shut down. +// Otherwise it returns false. +func (eng *Engine) IsShutdown() bool { + eng.l.RLock() + defer eng.l.RUnlock() + return eng.shutdown +} + +// ParseJob creates a new job from a text description using a shell-like syntax. +// +// The following syntax is used to parse `input`: +// +// * Words are separated using standard whitespaces as separators. +// * Quotes and backslashes are not interpreted. +// * Words of the form 'KEY=[VALUE]' are added to the job environment. +// * All other words are added to the job arguments. +// +// For example: +// +// job, _ := eng.ParseJob("VERBOSE=1 echo hello TEST=true world") +// +// The resulting job will have: +// job.Args={"echo", "hello", "world"} +// job.Env={"VERBOSE":"1", "TEST":"true"} +// +func (eng *Engine) ParseJob(input string) (*Job, error) { + // FIXME: use a full-featured command parser + scanner := bufio.NewScanner(strings.NewReader(input)) + scanner.Split(bufio.ScanWords) + var ( + cmd []string + env Env + ) + for scanner.Scan() { + word := scanner.Text() + kv := strings.SplitN(word, "=", 2) + if len(kv) == 2 { + env.Set(kv[0], kv[1]) + } else { + cmd = append(cmd, word) + } + } + if len(cmd) == 0 { + return nil, fmt.Errorf("empty command: '%s'", input) + } + job := eng.Job(cmd[0], cmd[1:]...) + job.Env().Init(&env) + return job, nil +} + +func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { + if !eng.Logging { + return 0, nil + } + prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) + return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) +} diff --git a/engine/engine_test.go b/engine/engine_test.go new file mode 100644 index 00000000..92f37572 --- /dev/null +++ b/engine/engine_test.go @@ -0,0 +1,162 @@ +package engine + +import ( + "bytes" + "strings" + "testing" +) + +func TestRegister(t *testing.T) { + if err := Register("dummy1", nil); err != nil { + t.Fatal(err) + } + + if err := Register("dummy1", nil); err == nil { + t.Fatalf("Expecting error, got none") + } + // Register is global so let's cleanup to avoid conflicts + defer unregister("dummy1") + + eng := New() + + //Should fail because global handlers are copied + //at the engine creation + if err := eng.Register("dummy1", nil); err == nil { + t.Fatalf("Expecting error, got none") + } + + if err := eng.Register("dummy2", nil); err != nil { + t.Fatal(err) + } + + if err := eng.Register("dummy2", nil); err == nil { + t.Fatalf("Expecting error, got none") + } + defer unregister("dummy2") +} + +func TestJob(t *testing.T) { + eng := New() + job1 := eng.Job("dummy1", "--level=awesome") + + if job1.handler != nil { + t.Fatalf("job1.handler should be empty") + } + + h := func(j *Job) Status { + j.Printf("%s\n", j.Name) + return 42 + } + + eng.Register("dummy2", h) + defer unregister("dummy2") + job2 := eng.Job("dummy2", "--level=awesome") + + if job2.handler == nil { + t.Fatalf("job2.handler shouldn't be nil") + } + + if job2.handler(job2) != 42 { + t.Fatalf("handler dummy2 was not found in job2") + } +} + +func TestEngineShutdown(t *testing.T) { + eng := New() + if eng.IsShutdown() { + t.Fatalf("Engine should not show as shutdown") + } + eng.Shutdown() + if !eng.IsShutdown() { + t.Fatalf("Engine should show as shutdown") + } +} + +func TestEngineCommands(t *testing.T) { + eng := New() + handler := func(job *Job) Status { return StatusOK } + eng.Register("foo", handler) + eng.Register("bar", handler) + eng.Register("echo", handler) + eng.Register("die", handler) + var output bytes.Buffer + commands := eng.Job("commands") + commands.Stdout.Add(&output) + commands.Run() + expected := "bar\ncommands\ndie\necho\nfoo\n" + if result := output.String(); result != expected { + t.Fatalf("Unexpected output:\nExpected = %v\nResult = %v\n", expected, result) + } +} + +func TestEngineString(t *testing.T) { + eng1 := New() + eng2 := New() + s1 := eng1.String() + s2 := eng2.String() + if eng1 == eng2 { + t.Fatalf("Different engines should have different names (%v == %v)", s1, s2) + } +} + +func TestEngineLogf(t *testing.T) { + eng := New() + input := "Test log line" + if n, err := eng.Logf("%s\n", input); err != nil { + t.Fatal(err) + } else if n < len(input) { + t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n) + } +} + +func TestParseJob(t *testing.T) { + eng := New() + // Verify that the resulting job calls to the right place + var called bool + eng.Register("echo", func(job *Job) Status { + called = true + return StatusOK + }) + input := "echo DEBUG=1 hello world VERBOSITY=42" + job, err := eng.ParseJob(input) + if err != nil { + t.Fatal(err) + } + if job.Name != "echo" { + t.Fatalf("Invalid job name: %v", job.Name) + } + if strings.Join(job.Args, ":::") != "hello:::world" { + t.Fatalf("Invalid job args: %v", job.Args) + } + if job.Env().Get("DEBUG") != "1" { + t.Fatalf("Invalid job env: %v", job.Env) + } + if job.Env().Get("VERBOSITY") != "42" { + t.Fatalf("Invalid job env: %v", job.Env) + } + if len(job.Env().Map()) != 2 { + t.Fatalf("Invalid job env: %v", job.Env) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("Job was not called") + } +} + +func TestCatchallEmptyName(t *testing.T) { + eng := New() + var called bool + eng.RegisterCatchall(func(job *Job) Status { + called = true + return StatusOK + }) + err := eng.Job("").Run() + if err == nil { + t.Fatalf("Engine.Job(\"\").Run() should return an error") + } + if called { + t.Fatalf("Engine.Job(\"\").Run() should return an error") + } +} diff --git a/engine/env.go b/engine/env.go new file mode 100644 index 00000000..a16dc35c --- /dev/null +++ b/engine/env.go @@ -0,0 +1,297 @@ +package engine + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +type Env []string + +// Get returns the last value associated with the given key. If there are no +// values associated with the key, Get returns the empty string. +func (env *Env) Get(key string) (value string) { + // not using Map() because of the extra allocations https://github.com/docker/docker/pull/7488#issuecomment-51638315 + for _, kv := range *env { + if strings.Index(kv, "=") == -1 { + continue + } + parts := strings.SplitN(kv, "=", 2) + if parts[0] != key { + continue + } + if len(parts) < 2 { + value = "" + } else { + value = parts[1] + } + } + return +} + +func (env *Env) Exists(key string) bool { + _, exists := env.Map()[key] + return exists +} + +// Len returns the number of keys in the environment. +// Note that len(env) might be different from env.Len(), +// because the same key might be set multiple times. +func (env *Env) Len() int { + return len(env.Map()) +} + +func (env *Env) Init(src *Env) { + (*env) = make([]string, 0, len(*src)) + for _, val := range *src { + (*env) = append((*env), val) + } +} + +func (env *Env) GetBool(key string) (value bool) { + s := strings.ToLower(strings.Trim(env.Get(key), " \t")) + if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { + return false + } + return true +} + +func (env *Env) SetBool(key string, value bool) { + if value { + env.Set(key, "1") + } else { + env.Set(key, "0") + } +} + +func (env *Env) GetInt(key string) int { + return int(env.GetInt64(key)) +} + +func (env *Env) GetInt64(key string) int64 { + s := strings.Trim(env.Get(key), " \t") + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0 + } + return val +} + +func (env *Env) SetInt(key string, value int) { + env.Set(key, fmt.Sprintf("%d", value)) +} + +func (env *Env) SetInt64(key string, value int64) { + env.Set(key, fmt.Sprintf("%d", value)) +} + +// Returns nil if key not found +func (env *Env) GetList(key string) []string { + sval := env.Get(key) + if sval == "" { + return nil + } + l := make([]string, 0, 1) + if err := json.Unmarshal([]byte(sval), &l); err != nil { + l = append(l, sval) + } + return l +} + +func (env *Env) GetSubEnv(key string) *Env { + sval := env.Get(key) + if sval == "" { + return nil + } + buf := bytes.NewBufferString(sval) + var sub Env + if err := sub.Decode(buf); err != nil { + return nil + } + return &sub +} + +func (env *Env) SetSubEnv(key string, sub *Env) error { + var buf bytes.Buffer + if err := sub.Encode(&buf); err != nil { + return err + } + env.Set(key, string(buf.Bytes())) + return nil +} + +func (env *Env) GetJson(key string, iface interface{}) error { + sval := env.Get(key) + if sval == "" { + return nil + } + return json.Unmarshal([]byte(sval), iface) +} + +func (env *Env) SetJson(key string, value interface{}) error { + sval, err := json.Marshal(value) + if err != nil { + return err + } + env.Set(key, string(sval)) + return nil +} + +func (env *Env) SetList(key string, value []string) error { + return env.SetJson(key, value) +} + +func (env *Env) Set(key, value string) { + *env = append(*env, key+"="+value) +} + +func NewDecoder(src io.Reader) *Decoder { + return &Decoder{ + json.NewDecoder(src), + } +} + +type Decoder struct { + *json.Decoder +} + +func (decoder *Decoder) Decode() (*Env, error) { + m := make(map[string]interface{}) + if err := decoder.Decoder.Decode(&m); err != nil { + return nil, err + } + env := &Env{} + for key, value := range m { + env.SetAuto(key, value) + } + return env, nil +} + +// DecodeEnv decodes `src` as a json dictionary, and adds +// each decoded key-value pair to the environment. +// +// If `src` cannot be decoded as a json dictionary, an error +// is returned. +func (env *Env) Decode(src io.Reader) error { + m := make(map[string]interface{}) + if err := json.NewDecoder(src).Decode(&m); err != nil { + return err + } + for k, v := range m { + env.SetAuto(k, v) + } + return nil +} + +func (env *Env) SetAuto(k string, v interface{}) { + // Issue 7941 - if the value in the incoming JSON is null then treat it + // as if they never specified the property at all. + if v == nil { + return + } + + // FIXME: we fix-convert float values to int, because + // encoding/json decodes integers to float64, but cannot encode them back. + // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) + if fval, ok := v.(float64); ok { + env.SetInt64(k, int64(fval)) + } else if sval, ok := v.(string); ok { + env.Set(k, sval) + } else if val, err := json.Marshal(v); err == nil { + env.Set(k, string(val)) + } else { + env.Set(k, fmt.Sprintf("%v", v)) + } +} + +func changeFloats(v interface{}) interface{} { + switch v := v.(type) { + case float64: + return int(v) + case map[string]interface{}: + for key, val := range v { + v[key] = changeFloats(val) + } + case []interface{}: + for idx, val := range v { + v[idx] = changeFloats(val) + } + } + return v +} + +func (env *Env) Encode(dst io.Writer) error { + m := make(map[string]interface{}) + for k, v := range env.Map() { + var val interface{} + if err := json.Unmarshal([]byte(v), &val); err == nil { + // FIXME: we fix-convert float values to int, because + // encoding/json decodes integers to float64, but cannot encode them back. + // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) + m[k] = changeFloats(val) + } else { + m[k] = v + } + } + if err := json.NewEncoder(dst).Encode(&m); err != nil { + return err + } + return nil +} + +func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { + // FIXME: return the number of bytes written to respect io.WriterTo + return 0, env.Encode(dst) +} + +func (env *Env) Import(src interface{}) (err error) { + defer func() { + if err != nil { + err = fmt.Errorf("ImportEnv: %s", err) + } + }() + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(src); err != nil { + return err + } + if err := env.Decode(&buf); err != nil { + return err + } + return nil +} + +func (env *Env) Map() map[string]string { + m := make(map[string]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + m[parts[0]] = parts[1] + } + return m +} + +// MultiMap returns a representation of env as a +// map of string arrays, keyed by string. +// This is the same structure as http headers for example, +// which allow each key to have multiple values. +func (env *Env) MultiMap() map[string][]string { + m := make(map[string][]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + m[parts[0]] = append(m[parts[0]], parts[1]) + } + return m +} + +// InitMultiMap removes all values in env, then initializes +// new values from the contents of m. +func (env *Env) InitMultiMap(m map[string][]string) { + (*env) = make([]string, 0, len(m)) + for k, vals := range m { + for _, v := range vals { + env.Set(k, v) + } + } +} diff --git a/engine/env_test.go b/engine/env_test.go new file mode 100644 index 00000000..b0caca9c --- /dev/null +++ b/engine/env_test.go @@ -0,0 +1,324 @@ +package engine + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/docker/docker/pkg/testutils" +) + +func TestEnvLenZero(t *testing.T) { + env := &Env{} + if env.Len() != 0 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvLenNotZero(t *testing.T) { + env := &Env{} + env.Set("foo", "bar") + env.Set("ga", "bu") + if env.Len() != 2 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvLenDup(t *testing.T) { + env := &Env{ + "foo=bar", + "foo=baz", + "a=b", + } + // len(env) != env.Len() + if env.Len() != 2 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvGetDup(t *testing.T) { + env := &Env{ + "foo=bar", + "foo=baz", + "foo=bif", + } + expected := "bif" + if v := env.Get("foo"); v != expected { + t.Fatalf("expect %q, got %q", expected, v) + } +} + +func TestNewJob(t *testing.T) { + job := mkJob(t, "dummy", "--level=awesome") + if job.Name != "dummy" { + t.Fatalf("Wrong job name: %s", job.Name) + } + if len(job.Args) != 1 { + t.Fatalf("Wrong number of job arguments: %d", len(job.Args)) + } + if job.Args[0] != "--level=awesome" { + t.Fatalf("Wrong job arguments: %s", job.Args[0]) + } +} + +func TestSetenv(t *testing.T) { + job := mkJob(t, "dummy") + job.Setenv("foo", "bar") + if val := job.Getenv("foo"); val != "bar" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } + + job.Setenv("bar", "") + if val := job.Getenv("bar"); val != "" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } + if val := job.Getenv("nonexistent"); val != "" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } +} + +func TestSetenvBool(t *testing.T) { + job := mkJob(t, "dummy") + job.SetenvBool("foo", true) + if val := job.GetenvBool("foo"); !val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } + + job.SetenvBool("bar", false) + if val := job.GetenvBool("bar"); val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } + + if val := job.GetenvBool("nonexistent"); val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } +} + +func TestSetenvInt(t *testing.T) { + job := mkJob(t, "dummy") + + job.SetenvInt("foo", -42) + if val := job.GetenvInt("foo"); val != -42 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } + + job.SetenvInt("bar", 42) + if val := job.GetenvInt("bar"); val != 42 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } + if val := job.GetenvInt("nonexistent"); val != 0 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } +} + +func TestSetenvList(t *testing.T) { + job := mkJob(t, "dummy") + + job.SetenvList("foo", []string{"bar"}) + if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } + + job.SetenvList("bar", nil) + if val := job.GetenvList("bar"); val != nil { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } + if val := job.GetenvList("nonexistent"); val != nil { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } +} + +func TestEnviron(t *testing.T) { + job := mkJob(t, "dummy") + job.Setenv("foo", "bar") + val, exists := job.Environ()["foo"] + if !exists { + t.Fatalf("foo not found in the environ") + } + if val != "bar" { + t.Fatalf("bar not found in the environ") + } +} + +func TestMultiMap(t *testing.T) { + e := &Env{} + e.Set("foo", "bar") + e.Set("bar", "baz") + e.Set("hello", "world") + m := e.MultiMap() + e2 := &Env{} + e2.Set("old_key", "something something something") + e2.InitMultiMap(m) + if v := e2.Get("old_key"); v != "" { + t.Fatalf("%#v", v) + } + if v := e2.Get("bar"); v != "baz" { + t.Fatalf("%#v", v) + } + if v := e2.Get("hello"); v != "world" { + t.Fatalf("%#v", v) + } +} + +func testMap(l int) [][2]string { + res := make([][2]string, l) + for i := 0; i < l; i++ { + t := [2]string{testutils.RandomString(5), testutils.RandomString(20)} + res[i] = t + } + return res +} + +func BenchmarkSet(b *testing.B) { + fix := testMap(100) + b.ResetTimer() + for i := 0; i < b.N; i++ { + env := &Env{} + for _, kv := range fix { + env.Set(kv[0], kv[1]) + } + } +} + +func BenchmarkSetJson(b *testing.B) { + fix := testMap(100) + type X struct { + f string + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + env := &Env{} + for _, kv := range fix { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkGet(b *testing.B) { + fix := testMap(100) + env := &Env{} + for _, kv := range fix { + env.Set(kv[0], kv[1]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, kv := range fix { + env.Get(kv[0]) + } + } +} + +func BenchmarkGetJson(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + for _, kv := range fix { + env.SetJson(kv[0], X{kv[1]}) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, kv := range fix { + if err := env.GetJson(kv[0], &X{}); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkEncode(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + // half a json + for i, kv := range fix { + if i%2 != 0 { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + continue + } + env.Set(kv[0], kv[1]) + } + var writer bytes.Buffer + b.ResetTimer() + for i := 0; i < b.N; i++ { + env.Encode(&writer) + writer.Reset() + } +} + +func BenchmarkDecode(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + // half a json + for i, kv := range fix { + if i%2 != 0 { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + continue + } + env.Set(kv[0], kv[1]) + } + var writer bytes.Buffer + env.Encode(&writer) + denv := &Env{} + reader := bytes.NewReader(writer.Bytes()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := denv.Decode(reader) + if err != nil { + b.Fatal(err) + } + reader.Seek(0, 0) + } +} + +func TestLongNumbers(t *testing.T) { + type T struct { + TestNum int64 + } + v := T{67108864} + var buf bytes.Buffer + e := &Env{} + e.SetJson("Test", v) + if err := e.Encode(&buf); err != nil { + t.Fatal(err) + } + res := make(map[string]T) + if err := json.Unmarshal(buf.Bytes(), &res); err != nil { + t.Fatal(err) + } + if res["Test"].TestNum != v.TestNum { + t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) + } +} + +func TestLongNumbersArray(t *testing.T) { + type T struct { + TestNum []int64 + } + v := T{[]int64{67108864}} + var buf bytes.Buffer + e := &Env{} + e.SetJson("Test", v) + if err := e.Encode(&buf); err != nil { + t.Fatal(err) + } + res := make(map[string]T) + if err := json.Unmarshal(buf.Bytes(), &res); err != nil { + t.Fatal(err) + } + if res["Test"].TestNum[0] != v.TestNum[0] { + t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) + } +} diff --git a/engine/hack.go b/engine/hack.go new file mode 100644 index 00000000..be4fadbe --- /dev/null +++ b/engine/hack.go @@ -0,0 +1,21 @@ +package engine + +type Hack map[string]interface{} + +func (eng *Engine) Hack_GetGlobalVar(key string) interface{} { + if eng.hack == nil { + return nil + } + val, exists := eng.hack[key] + if !exists { + return nil + } + return val +} + +func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) { + if eng.hack == nil { + eng.hack = make(Hack) + } + eng.hack[key] = val +} diff --git a/engine/helpers_test.go b/engine/helpers_test.go new file mode 100644 index 00000000..cfa11da7 --- /dev/null +++ b/engine/helpers_test.go @@ -0,0 +1,11 @@ +package engine + +import ( + "testing" +) + +var globalTestID string + +func mkJob(t *testing.T, name string, args ...string) *Job { + return New().Job(name, args...) +} diff --git a/engine/http.go b/engine/http.go new file mode 100644 index 00000000..7e4dcd7b --- /dev/null +++ b/engine/http.go @@ -0,0 +1,42 @@ +package engine + +import ( + "net/http" + "path" +) + +// ServeHTTP executes a job as specified by the http request `r`, and sends the +// result as an http response. +// This method allows an Engine instance to be passed as a standard http.Handler interface. +// +// Note that the protocol used in this method is a convenience wrapper and is not the canonical +// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, +// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response +// once data has been written to the body, which makes it inconvenient to return metadata such +// as the exit status. +// +func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var ( + jobName = path.Base(r.URL.Path) + jobArgs, exists = r.URL.Query()["a"] + ) + if !exists { + jobArgs = []string{} + } + w.Header().Set("Job-Name", jobName) + for _, arg := range jobArgs { + w.Header().Add("Job-Args", arg) + } + job := eng.Job(jobName, jobArgs...) + job.Stdout.Add(w) + job.Stderr.Add(w) + // FIXME: distinguish job status from engine error in Run() + // The former should be passed as a special header, the former + // should cause a 500 status + w.WriteHeader(http.StatusOK) + // The exit status cannot be sent reliably with HTTP1, because headers + // can only be sent before the body. + // (we could possibly use http footers via chunked encoding, but I couldn't find + // how to use them in net/http) + job.Run() +} diff --git a/engine/job.go b/engine/job.go new file mode 100644 index 00000000..d032ff02 --- /dev/null +++ b/engine/job.go @@ -0,0 +1,238 @@ +package engine + +import ( + "bytes" + "fmt" + "io" + "strings" + "time" +) + +// A job is the fundamental unit of work in the docker engine. +// Everything docker can do should eventually be exposed as a job. +// For example: execute a process in a container, create a new container, +// download an archive from the internet, serve the http api, etc. +// +// The job API is designed after unix processes: a job has a name, arguments, +// environment variables, standard streams for input, output and error, and +// an exit status which can indicate success (0) or error (anything else). +// +// For status, 0 indicates success, and any other integers indicates an error. +// This allows for richer error reporting. +// +type Job struct { + Eng *Engine + Name string + Args []string + env *Env + Stdout *Output + Stderr *Output + Stdin *Input + handler Handler + status Status + end time.Time + closeIO bool +} + +type Status int + +const ( + StatusOK Status = 0 + StatusErr Status = 1 + StatusNotFound Status = 127 +) + +// Run executes the job and blocks until the job completes. +// If the job returns a failure status, an error is returned +// which includes the status. +func (job *Job) Run() error { + if job.Eng.IsShutdown() { + return fmt.Errorf("engine is shutdown") + } + // FIXME: this is a temporary workaround to avoid Engine.Shutdown + // waiting 5 seconds for server/api.ServeApi to complete (which it never will) + // everytime the daemon is cleanly restarted. + // The permanent fix is to implement Job.Stop and Job.OnStop so that + // ServeApi can cooperate and terminate cleanly. + if job.Name != "serveapi" { + job.Eng.l.Lock() + job.Eng.tasks.Add(1) + job.Eng.l.Unlock() + defer job.Eng.tasks.Done() + } + // FIXME: make this thread-safe + // FIXME: implement wait + if !job.end.IsZero() { + return fmt.Errorf("%s: job has already completed", job.Name) + } + // Log beginning and end of the job + job.Eng.Logf("+job %s", job.CallString()) + defer func() { + job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) + }() + var errorMessage = bytes.NewBuffer(nil) + job.Stderr.Add(errorMessage) + if job.handler == nil { + job.Errorf("%s: command not found", job.Name) + job.status = 127 + } else { + job.status = job.handler(job) + job.end = time.Now() + } + if job.closeIO { + // Wait for all background tasks to complete + if err := job.Stdout.Close(); err != nil { + return err + } + if err := job.Stderr.Close(); err != nil { + return err + } + if err := job.Stdin.Close(); err != nil { + return err + } + } + if job.status != 0 { + return fmt.Errorf("%s", Tail(errorMessage, 1)) + } + + return nil +} + +func (job *Job) CallString() string { + return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", ")) +} + +func (job *Job) StatusString() string { + // If the job hasn't completed, status string is empty + if job.end.IsZero() { + return "" + } + var okerr string + if job.status == StatusOK { + okerr = "OK" + } else { + okerr = "ERR" + } + return fmt.Sprintf(" = %s (%d)", okerr, job.status) +} + +// String returns a human-readable description of `job` +func (job *Job) String() string { + return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) +} + +func (job *Job) Env() *Env { + return job.env +} + +func (job *Job) EnvExists(key string) (value bool) { + return job.env.Exists(key) +} + +func (job *Job) Getenv(key string) (value string) { + return job.env.Get(key) +} + +func (job *Job) GetenvBool(key string) (value bool) { + return job.env.GetBool(key) +} + +func (job *Job) SetenvBool(key string, value bool) { + job.env.SetBool(key, value) +} + +func (job *Job) GetenvSubEnv(key string) *Env { + return job.env.GetSubEnv(key) +} + +func (job *Job) SetenvSubEnv(key string, value *Env) error { + return job.env.SetSubEnv(key, value) +} + +func (job *Job) GetenvInt64(key string) int64 { + return job.env.GetInt64(key) +} + +func (job *Job) GetenvInt(key string) int { + return job.env.GetInt(key) +} + +func (job *Job) SetenvInt64(key string, value int64) { + job.env.SetInt64(key, value) +} + +func (job *Job) SetenvInt(key string, value int) { + job.env.SetInt(key, value) +} + +// Returns nil if key not found +func (job *Job) GetenvList(key string) []string { + return job.env.GetList(key) +} + +func (job *Job) GetenvJson(key string, iface interface{}) error { + return job.env.GetJson(key, iface) +} + +func (job *Job) SetenvJson(key string, value interface{}) error { + return job.env.SetJson(key, value) +} + +func (job *Job) SetenvList(key string, value []string) error { + return job.env.SetJson(key, value) +} + +func (job *Job) Setenv(key, value string) { + job.env.Set(key, value) +} + +// DecodeEnv decodes `src` as a json dictionary, and adds +// each decoded key-value pair to the environment. +// +// If `src` cannot be decoded as a json dictionary, an error +// is returned. +func (job *Job) DecodeEnv(src io.Reader) error { + return job.env.Decode(src) +} + +func (job *Job) EncodeEnv(dst io.Writer) error { + return job.env.Encode(dst) +} + +func (job *Job) ImportEnv(src interface{}) (err error) { + return job.env.Import(src) +} + +func (job *Job) Environ() map[string]string { + return job.env.Map() +} + +func (job *Job) Logf(format string, args ...interface{}) (n int, err error) { + prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) + return fmt.Fprintf(job.Stderr, prefixedFormat, args...) +} + +func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { + return fmt.Fprintf(job.Stdout, format, args...) +} + +func (job *Job) Errorf(format string, args ...interface{}) Status { + if format[len(format)-1] != '\n' { + format = format + "\n" + } + fmt.Fprintf(job.Stderr, format, args...) + return StatusErr +} + +func (job *Job) Error(err error) Status { + fmt.Fprintf(job.Stderr, "%s\n", err) + return StatusErr +} + +func (job *Job) StatusCode() int { + return int(job.status) +} + +func (job *Job) SetCloseIO(val bool) { + job.closeIO = val +} diff --git a/engine/job_test.go b/engine/job_test.go new file mode 100644 index 00000000..67e72398 --- /dev/null +++ b/engine/job_test.go @@ -0,0 +1,75 @@ +package engine + +import ( + "bytes" + "fmt" + "testing" +) + +func TestJobStatusOK(t *testing.T) { + eng := New() + eng.Register("return_ok", func(job *Job) Status { return StatusOK }) + err := eng.Job("return_ok").Run() + if err != nil { + t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err) + } +} + +func TestJobStatusErr(t *testing.T) { + eng := New() + eng.Register("return_err", func(job *Job) Status { return StatusErr }) + err := eng.Job("return_err").Run() + if err == nil { + t.Fatalf("When a job returns StatusErr, Run() should return an error") + } +} + +func TestJobStatusNotFound(t *testing.T) { + eng := New() + eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) + err := eng.Job("return_not_found").Run() + if err == nil { + t.Fatalf("When a job returns StatusNotFound, Run() should return an error") + } +} + +func TestJobStdoutString(t *testing.T) { + eng := New() + // FIXME: test multiple combinations of output and status + eng.Register("say_something_in_stdout", func(job *Job) Status { + job.Printf("Hello world\n") + return StatusOK + }) + + job := eng.Job("say_something_in_stdout") + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) + if err := job.Run(); err != nil { + t.Fatal(err) + } + fmt.Println(outputBuffer) + var output = Tail(outputBuffer, 1) + if expectedOutput := "Hello world"; output != expectedOutput { + t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) + } +} + +func TestJobStderrString(t *testing.T) { + eng := New() + // FIXME: test multiple combinations of output and status + eng.Register("say_something_in_stderr", func(job *Job) Status { + job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") + return StatusOK + }) + + job := eng.Job("say_something_in_stderr") + var outputBuffer = bytes.NewBuffer(nil) + job.Stderr.Add(outputBuffer) + if err := job.Run(); err != nil { + t.Fatal(err) + } + var output = Tail(outputBuffer, 1) + if expectedOutput := "Something happened"; output != expectedOutput { + t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) + } +} diff --git a/engine/shutdown_test.go b/engine/shutdown_test.go new file mode 100644 index 00000000..13d80492 --- /dev/null +++ b/engine/shutdown_test.go @@ -0,0 +1,80 @@ +package engine + +import ( + "testing" + "time" +) + +func TestShutdownEmpty(t *testing.T) { + eng := New() + if eng.IsShutdown() { + t.Fatalf("IsShutdown should be false") + } + eng.Shutdown() + if !eng.IsShutdown() { + t.Fatalf("IsShutdown should be true") + } +} + +func TestShutdownAfterRun(t *testing.T) { + eng := New() + var called bool + eng.Register("foo", func(job *Job) Status { + called = true + return StatusOK + }) + if err := eng.Job("foo").Run(); err != nil { + t.Fatal(err) + } + eng.Shutdown() + if err := eng.Job("foo").Run(); err == nil { + t.Fatalf("%#v", *eng) + } +} + +// An approximate and racy, but better-than-nothing test that +// +func TestShutdownDuringRun(t *testing.T) { + var ( + jobDelay time.Duration = 500 * time.Millisecond + jobDelayLow time.Duration = 100 * time.Millisecond + jobDelayHigh time.Duration = 700 * time.Millisecond + ) + eng := New() + var completed bool + eng.Register("foo", func(job *Job) Status { + time.Sleep(jobDelay) + completed = true + return StatusOK + }) + go eng.Job("foo").Run() + time.Sleep(50 * time.Millisecond) + done := make(chan struct{}) + var startShutdown time.Time + go func() { + startShutdown = time.Now() + eng.Shutdown() + close(done) + }() + time.Sleep(50 * time.Millisecond) + if err := eng.Job("foo").Run(); err == nil { + t.Fatalf("run on shutdown should fail: %#v", *eng) + } + <-done + // Verify that Shutdown() blocks for roughly 500ms, instead + // of returning almost instantly. + // + // We use >100ms to leave ample margin for race conditions between + // goroutines. It's possible (but unlikely in reasonable testing + // conditions), that this test will cause a false positive or false + // negative. But it's probably better than not having any test + // for the 99.999% of time where testing conditions are reasonable. + if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() { + t.Fatalf("shutdown did not block long enough: %v", d) + } else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() { + t.Fatalf("shutdown blocked too long: %v", d) + } + if !completed { + t.Fatalf("job did not complete") + } +} diff --git a/engine/streams.go b/engine/streams.go new file mode 100644 index 00000000..99e876e1 --- /dev/null +++ b/engine/streams.go @@ -0,0 +1,222 @@ +package engine + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sync" +) + +type Output struct { + sync.Mutex + dests []io.Writer + tasks sync.WaitGroup + used bool +} + +// Tail returns the n last lines of a buffer +// stripped out of the last \n, if any +// if n <= 0, returns an empty string +func Tail(buffer *bytes.Buffer, n int) string { + if n <= 0 { + return "" + } + bytes := buffer.Bytes() + if len(bytes) > 0 && bytes[len(bytes)-1] == '\n' { + bytes = bytes[:len(bytes)-1] + } + for i := buffer.Len() - 2; i >= 0; i-- { + if bytes[i] == '\n' { + n-- + if n == 0 { + return string(bytes[i+1:]) + } + } + } + return string(bytes) +} + +// NewOutput returns a new Output object with no destinations attached. +// Writing to an empty Output will cause the written data to be discarded. +func NewOutput() *Output { + return &Output{} +} + +// Return true if something was written on this output +func (o *Output) Used() bool { + o.Lock() + defer o.Unlock() + return o.used +} + +// Add attaches a new destination to the Output. Any data subsequently written +// to the output will be written to the new destination in addition to all the others. +// This method is thread-safe. +func (o *Output) Add(dst io.Writer) { + o.Lock() + defer o.Unlock() + o.dests = append(o.dests, dst) +} + +// Set closes and remove existing destination and then attaches a new destination to +// the Output. Any data subsequently written to the output will be written to the new +// destination in addition to all the others. This method is thread-safe. +func (o *Output) Set(dst io.Writer) { + o.Close() + o.Lock() + defer o.Unlock() + o.dests = []io.Writer{dst} +} + +// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination, +// and returns its reading end for consumption by the caller. +// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package. +// This method is thread-safe. +func (o *Output) AddPipe() (io.Reader, error) { + r, w := io.Pipe() + o.Add(w) + return r, nil +} + +// Write writes the same data to all registered destinations. +// This method is thread-safe. +func (o *Output) Write(p []byte) (n int, err error) { + o.Lock() + defer o.Unlock() + o.used = true + var firstErr error + for _, dst := range o.dests { + _, err := dst.Write(p) + if err != nil && firstErr == nil { + firstErr = err + } + } + return len(p), firstErr +} + +// Close unregisters all destinations and waits for all background +// AddTail and AddString tasks to complete. +// The Close method of each destination is called if it exists. +func (o *Output) Close() error { + o.Lock() + defer o.Unlock() + var firstErr error + for _, dst := range o.dests { + if closer, ok := dst.(io.Closer); ok { + err := closer.Close() + if err != nil && firstErr == nil { + firstErr = err + } + } + } + o.tasks.Wait() + return firstErr +} + +type Input struct { + src io.Reader + sync.Mutex +} + +// NewInput returns a new Input object with no source attached. +// Reading to an empty Input will return io.EOF. +func NewInput() *Input { + return &Input{} +} + +// Read reads from the input in a thread-safe way. +func (i *Input) Read(p []byte) (n int, err error) { + i.Mutex.Lock() + defer i.Mutex.Unlock() + if i.src == nil { + return 0, io.EOF + } + return i.src.Read(p) +} + +// Closes the src +// Not thread safe on purpose +func (i *Input) Close() error { + if i.src != nil { + if closer, ok := i.src.(io.Closer); ok { + return closer.Close() + } + } + return nil +} + +// Add attaches a new source to the input. +// Add can only be called once per input. Subsequent calls will +// return an error. +func (i *Input) Add(src io.Reader) error { + i.Mutex.Lock() + defer i.Mutex.Unlock() + if i.src != nil { + return fmt.Errorf("Maximum number of sources reached: 1") + } + i.src = src + return nil +} + +// AddEnv starts a new goroutine which will decode all subsequent data +// as a stream of json-encoded objects, and point `dst` to the last +// decoded object. +// The result `env` can be queried using the type-neutral Env interface. +// It is not safe to query `env` until the Output is closed. +func (o *Output) AddEnv() (dst *Env, err error) { + src, err := o.AddPipe() + if err != nil { + return nil, err + } + dst = &Env{} + o.tasks.Add(1) + go func() { + defer o.tasks.Done() + decoder := NewDecoder(src) + for { + env, err := decoder.Decode() + if err != nil { + return + } + *dst = *env + } + }() + return dst, nil +} + +func (o *Output) AddListTable() (dst *Table, err error) { + src, err := o.AddPipe() + if err != nil { + return nil, err + } + dst = NewTable("", 0) + o.tasks.Add(1) + go func() { + defer o.tasks.Done() + content, err := ioutil.ReadAll(src) + if err != nil { + return + } + if _, err := dst.ReadListFrom(content); err != nil { + return + } + }() + return dst, nil +} + +func (o *Output) AddTable() (dst *Table, err error) { + src, err := o.AddPipe() + if err != nil { + return nil, err + } + dst = NewTable("", 0) + o.tasks.Add(1) + go func() { + defer o.tasks.Done() + if _, err := dst.ReadFrom(src); err != nil { + return + } + }() + return dst, nil +} diff --git a/engine/streams_test.go b/engine/streams_test.go new file mode 100644 index 00000000..5cfd5d0e --- /dev/null +++ b/engine/streams_test.go @@ -0,0 +1,210 @@ +package engine + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +type sentinelWriteCloser struct { + calledWrite bool + calledClose bool +} + +func (w *sentinelWriteCloser) Write(p []byte) (int, error) { + w.calledWrite = true + return len(p), nil +} + +func (w *sentinelWriteCloser) Close() error { + w.calledClose = true + return nil +} + +func TestOutputAddEnv(t *testing.T) { + input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}" + o := NewOutput() + result, err := o.AddEnv() + if err != nil { + t.Fatal(err) + } + o.Write([]byte(input)) + o.Close() + if v := result.Get("foo"); v != "bar" { + t.Errorf("Expected %v, got %v", "bar", v) + } + if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 { + t.Errorf("Expected %v, got %v", 42, v) + } + if v := result.Get("this-value-doesnt-exist"); v != "" { + t.Errorf("Expected %v, got %v", "", v) + } +} + +func TestOutputAddClose(t *testing.T) { + o := NewOutput() + var s sentinelWriteCloser + o.Add(&s) + if err := o.Close(); err != nil { + t.Fatal(err) + } + // Write data after the output is closed. + // Write should succeed, but no destination should receive it. + if _, err := o.Write([]byte("foo bar")); err != nil { + t.Fatal(err) + } + if !s.calledClose { + t.Fatal("Output.Close() didn't close the destination") + } +} + +func TestOutputAddPipe(t *testing.T) { + var testInputs = []string{ + "hello, world!", + "One\nTwo\nThree", + "", + "A line\nThen another nl-terminated line\n", + "A line followed by an empty line\n\n", + } + for _, input := range testInputs { + expectedOutput := input + o := NewOutput() + r, err := o.AddPipe() + if err != nil { + t.Fatal(err) + } + go func(o *Output) { + if n, err := o.Write([]byte(input)); err != nil { + t.Error(err) + } else if n != len(input) { + t.Errorf("Expected %d, got %d", len(input), n) + } + if err := o.Close(); err != nil { + t.Error(err) + } + }(o) + output, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if string(output) != expectedOutput { + t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output) + } + } +} + +func TestTail(t *testing.T) { + var tests = make(map[string][]string) + tests["hello, world!"] = []string{ + "", + "hello, world!", + "hello, world!", + "hello, world!", + } + tests["One\nTwo\nThree"] = []string{ + "", + "Three", + "Two\nThree", + "One\nTwo\nThree", + } + for input, outputs := range tests { + for n, expectedOutput := range outputs { + output := Tail(bytes.NewBufferString(input), n) + if output != expectedOutput { + t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) + } + } + } +} + +func lastLine(txt string) string { + scanner := bufio.NewScanner(strings.NewReader(txt)) + var lastLine string + for scanner.Scan() { + lastLine = scanner.Text() + } + return lastLine +} + +func TestOutputAdd(t *testing.T) { + o := NewOutput() + b := &bytes.Buffer{} + o.Add(b) + input := "hello, world!" + if n, err := o.Write([]byte(input)); err != nil { + t.Fatal(err) + } else if n != len(input) { + t.Fatalf("Expected %d, got %d", len(input), n) + } + if output := b.String(); output != input { + t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) + } +} + +func TestOutputWriteError(t *testing.T) { + o := NewOutput() + buf := &bytes.Buffer{} + o.Add(buf) + r, w := io.Pipe() + input := "Hello there" + expectedErr := fmt.Errorf("This is an error") + r.CloseWithError(expectedErr) + o.Add(w) + n, err := o.Write([]byte(input)) + if err != expectedErr { + t.Fatalf("Output.Write() should return the first error encountered, if any") + } + if buf.String() != input { + t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error") + } + if n != len(input) { + t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination") + } +} + +func TestInputAddEmpty(t *testing.T) { + i := NewInput() + var b bytes.Buffer + if err := i.Add(&b); err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(i) + if err != nil { + t.Fatal(err) + } + if len(data) > 0 { + t.Fatalf("Read from empty input shoul yield no data") + } +} + +func TestInputAddTwo(t *testing.T) { + i := NewInput() + var b1 bytes.Buffer + // First add should succeed + if err := i.Add(&b1); err != nil { + t.Fatal(err) + } + var b2 bytes.Buffer + // Second add should fail + if err := i.Add(&b2); err == nil { + t.Fatalf("Adding a second source should return an error") + } +} + +func TestInputAddNotEmpty(t *testing.T) { + i := NewInput() + b := bytes.NewBufferString("hello world\nabc") + expectedResult := b.String() + i.Add(b) + result, err := ioutil.ReadAll(i) + if err != nil { + t.Fatal(err) + } + if string(result) != expectedResult { + t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result) + } +} diff --git a/engine/table.go b/engine/table.go new file mode 100644 index 00000000..4498bdf1 --- /dev/null +++ b/engine/table.go @@ -0,0 +1,140 @@ +package engine + +import ( + "bytes" + "encoding/json" + "io" + "sort" + "strconv" +) + +type Table struct { + Data []*Env + sortKey string + Chan chan *Env +} + +func NewTable(sortKey string, sizeHint int) *Table { + return &Table{ + make([]*Env, 0, sizeHint), + sortKey, + make(chan *Env), + } +} + +func (t *Table) SetKey(sortKey string) { + t.sortKey = sortKey +} + +func (t *Table) Add(env *Env) { + t.Data = append(t.Data, env) +} + +func (t *Table) Len() int { + return len(t.Data) +} + +func (t *Table) Less(a, b int) bool { + return t.lessBy(a, b, t.sortKey) +} + +func (t *Table) lessBy(a, b int, by string) bool { + keyA := t.Data[a].Get(by) + keyB := t.Data[b].Get(by) + intA, errA := strconv.ParseInt(keyA, 10, 64) + intB, errB := strconv.ParseInt(keyB, 10, 64) + if errA == nil && errB == nil { + return intA < intB + } + return keyA < keyB +} + +func (t *Table) Swap(a, b int) { + tmp := t.Data[a] + t.Data[a] = t.Data[b] + t.Data[b] = tmp +} + +func (t *Table) Sort() { + sort.Sort(t) +} + +func (t *Table) ReverseSort() { + sort.Sort(sort.Reverse(t)) +} + +func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { + if _, err := dst.Write([]byte{'['}); err != nil { + return -1, err + } + n = 1 + for i, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + if i != len(t.Data)-1 { + if _, err := dst.Write([]byte{','}); err != nil { + return -1, err + } + n++ + } + } + if _, err := dst.Write([]byte{']'}); err != nil { + return -1, err + } + return n + 1, nil +} + +func (t *Table) ToListString() (string, error) { + buffer := bytes.NewBuffer(nil) + if _, err := t.WriteListTo(buffer); err != nil { + return "", err + } + return buffer.String(), nil +} + +func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { + for _, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + } + return n, nil +} + +func (t *Table) ReadListFrom(src []byte) (n int64, err error) { + var array []interface{} + + if err := json.Unmarshal(src, &array); err != nil { + return -1, err + } + + for _, item := range array { + if m, ok := item.(map[string]interface{}); ok { + env := &Env{} + for key, value := range m { + env.SetAuto(key, value) + } + t.Add(env) + } + } + + return int64(len(src)), nil +} + +func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { + decoder := NewDecoder(src) + for { + env, err := decoder.Decode() + if err == io.EOF { + return 0, nil + } else if err != nil { + return -1, err + } + t.Add(env) + } +} diff --git a/engine/table_test.go b/engine/table_test.go new file mode 100644 index 00000000..9a32ac9c --- /dev/null +++ b/engine/table_test.go @@ -0,0 +1,112 @@ +package engine + +import ( + "bytes" + "encoding/json" + "testing" +) + +func TestTableWriteTo(t *testing.T) { + table := NewTable("", 0) + e := &Env{} + e.Set("foo", "bar") + table.Add(e) + var buf bytes.Buffer + if _, err := table.WriteTo(&buf); err != nil { + t.Fatal(err) + } + output := make(map[string]string) + if err := json.Unmarshal(buf.Bytes(), &output); err != nil { + t.Fatal(err) + } + if len(output) != 1 { + t.Fatalf("Incorrect output: %v", output) + } + if val, exists := output["foo"]; !exists || val != "bar" { + t.Fatalf("Inccorect output: %v", output) + } +} + +func TestTableSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.Sort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "B" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "C" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } +} + +func TestTableReverseSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.ReverseSort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "C" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "B" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } +} diff --git a/events/events.go b/events/events.go new file mode 100644 index 00000000..57a82cad --- /dev/null +++ b/events/events.go @@ -0,0 +1,176 @@ +package events + +import ( + "encoding/json" + "sync" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/utils" +) + +const eventsLimit = 64 + +type listener chan<- *utils.JSONMessage + +type Events struct { + mu sync.RWMutex + events []*utils.JSONMessage + subscribers []listener +} + +func New() *Events { + return &Events{ + events: make([]*utils.JSONMessage, 0, eventsLimit), + } +} + +// Install installs events public api in docker engine +func (e *Events) Install(eng *engine.Engine) error { + // Here you should describe public interface + jobs := map[string]engine.Handler{ + "events": e.Get, + "log": e.Log, + "subscribers_count": e.SubscribersCount, + } + for name, job := range jobs { + if err := eng.Register(name, job); err != nil { + return err + } + } + return nil +} + +func (e *Events) Get(job *engine.Job) engine.Status { + var ( + since = job.GetenvInt64("since") + until = job.GetenvInt64("until") + timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) + ) + + // If no until, disable timeout + if until == 0 { + timeout.Stop() + } + + listener := make(chan *utils.JSONMessage) + e.subscribe(listener) + defer e.unsubscribe(listener) + + job.Stdout.Write(nil) + + // Resend every event in the [since, until] time interval. + if since != 0 { + if err := e.writeCurrent(job, since, until); err != nil { + return job.Error(err) + } + } + + for { + select { + case event, ok := <-listener: + if !ok { + return engine.StatusOK + } + if err := writeEvent(job, event); err != nil { + return job.Error(err) + } + case <-timeout.C: + return engine.StatusOK + } + } +} + +func (e *Events) Log(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("usage: %s ACTION ID FROM", job.Name) + } + // not waiting for receivers + go e.log(job.Args[0], job.Args[1], job.Args[2]) + return engine.StatusOK +} + +func (e *Events) SubscribersCount(job *engine.Job) engine.Status { + ret := &engine.Env{} + ret.SetInt("count", e.subscribersCount()) + ret.WriteTo(job.Stdout) + return engine.StatusOK +} + +func writeEvent(job *engine.Job, event *utils.JSONMessage) error { + // When sending an event JSON serialization errors are ignored, but all + // other errors lead to the eviction of the listener. + if b, err := json.Marshal(event); err == nil { + if _, err = job.Stdout.Write(b); err != nil { + return err + } + } + return nil +} + +func (e *Events) writeCurrent(job *engine.Job, since, until int64) error { + e.mu.RLock() + for _, event := range e.events { + if event.Time >= since && (event.Time <= until || until == 0) { + if err := writeEvent(job, event); err != nil { + e.mu.RUnlock() + return err + } + } + } + e.mu.RUnlock() + return nil +} + +func (e *Events) subscribersCount() int { + e.mu.RLock() + c := len(e.subscribers) + e.mu.RUnlock() + return c +} + +func (e *Events) log(action, id, from string) { + e.mu.Lock() + now := time.Now().UTC().Unix() + jm := &utils.JSONMessage{Status: action, ID: id, From: from, Time: now} + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + for _, s := range e.subscribers { + // We give each subscriber a 100ms time window to receive the event, + // after which we move to the next. + select { + case s <- jm: + case <-time.After(100 * time.Millisecond): + } + } + e.mu.Unlock() +} + +func (e *Events) subscribe(l listener) { + e.mu.Lock() + e.subscribers = append(e.subscribers, l) + e.mu.Unlock() +} + +// unsubscribe closes and removes the specified listener from the list of +// previously registed ones. +// It returns a boolean value indicating if the listener was successfully +// found, closed and unregistered. +func (e *Events) unsubscribe(l listener) bool { + e.mu.Lock() + for i, subscriber := range e.subscribers { + if subscriber == l { + close(l) + e.subscribers = append(e.subscribers[:i], e.subscribers[i+1:]...) + e.mu.Unlock() + return true + } + } + e.mu.Unlock() + return false +} diff --git a/events/events_test.go b/events/events_test.go new file mode 100644 index 00000000..d4fc664b --- /dev/null +++ b/events/events_test.go @@ -0,0 +1,154 @@ +package events + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "testing" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/utils" +) + +func TestEventsPublish(t *testing.T) { + e := New() + l1 := make(chan *utils.JSONMessage) + l2 := make(chan *utils.JSONMessage) + e.subscribe(l1) + e.subscribe(l2) + count := e.subscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + go e.log("test", "cont", "image") + select { + case msg := <-l1: + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if msg.Status != "test" { + t.Fatalf("Status should be test, got %s", msg.Status) + } + if msg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", msg.ID) + } + if msg.From != "image" { + t.Fatalf("From should be image, got %s", msg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if msg.Status != "test" { + t.Fatalf("Status should be test, got %s", msg.Status) + } + if msg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", msg.ID) + } + if msg.From != "image" { + t.Fatalf("From should be image, got %s", msg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsPublishTimeout(t *testing.T) { + e := New() + l := make(chan *utils.JSONMessage) + e.subscribe(l) + + c := make(chan struct{}) + go func() { + e.log("test", "cont", "image") + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + eng := engine.New() + if err := e.Install(eng); err != nil { + t.Fatal(err) + } + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + job := eng.Job("log", action, id, from) + if err := job.Run(); err != nil { + t.Fatal(err) + } + } + time.Sleep(50 * time.Millisecond) + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + job := eng.Job("events") + job.SetenvInt64("since", 1) + job.SetenvInt64("until", time.Now().Unix()) + buf := bytes.NewBuffer(nil) + job.Stdout.Add(buf) + if err := job.Run(); err != nil { + t.Fatal(err) + } + buf = bytes.NewBuffer(buf.Bytes()) + dec := json.NewDecoder(buf) + var msgs []utils.JSONMessage + for { + var jm utils.JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + msgs = append(msgs, jm) + } + if len(msgs) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(msgs)) + } + first := msgs[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_15", first.Status) + } + last := msgs[len(msgs)-1] + if last.Status != "action_79" { + t.Fatalf("First action is %s, must be action_79", first.Status) + } +} + +func TestEventsCountJob(t *testing.T) { + e := New() + eng := engine.New() + if err := e.Install(eng); err != nil { + t.Fatal(err) + } + l1 := make(chan *utils.JSONMessage) + l2 := make(chan *utils.JSONMessage) + e.subscribe(l1) + e.subscribe(l2) + job := eng.Job("subscribers_count") + env, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + t.Fatal(err) + } + count := env.GetInt("count") + if count != 2 { + t.Fatalf("There must be 2 subscribers, got %d", count) + } +} diff --git a/graph/MAINTAINERS b/graph/MAINTAINERS new file mode 100644 index 00000000..e409454b --- /dev/null +++ b/graph/MAINTAINERS @@ -0,0 +1,5 @@ +Solomon Hykes (@shykes) +Victor Vieux (@vieux) +Michael Crosby (@crosbymichael) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) diff --git a/graph/export.go b/graph/export.go new file mode 100644 index 00000000..86dc5a34 --- /dev/null +++ b/graph/export.go @@ -0,0 +1,168 @@ +package graph + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" +) + +// CmdImageExport exports all images with the given tag. All versions +// containing the same tag are exported. The resulting output is an +// uncompressed tar ball. +// name is the set of tags to export. +// out is the writer where the images are written to. +func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { + if len(job.Args) < 1 { + return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name) + } + // get image json + tempdir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(tempdir) + + rootRepoMap := map[string]Repository{} + for _, name := range job.Args { + log.Debugf("Serializing %s", name) + rootRepo := s.Repositories[name] + if rootRepo != nil { + // this is a base repo name, like 'busybox' + for _, id := range rootRepo { + if _, ok := rootRepoMap[name]; !ok { + rootRepoMap[name] = rootRepo + } else { + log.Debugf("Duplicate key [%s]", name) + if rootRepoMap[name].Contains(rootRepo) { + log.Debugf("skipping, because it is present [%s:%q]", name, rootRepo) + continue + } + log.Debugf("updating [%s]: [%q] with [%q]", name, rootRepoMap[name], rootRepo) + rootRepoMap[name].Update(rootRepo) + } + + if err := s.exportImage(job.Eng, id, tempdir); err != nil { + return job.Error(err) + } + } + } else { + img, err := s.LookupImage(name) + if err != nil { + return job.Error(err) + } + + if img != nil { + // This is a named image like 'busybox:latest' + repoName, repoTag := parsers.ParseRepositoryTag(name) + + // check this length, because a lookup of a truncated has will not have a tag + // and will not need to be added to this map + if len(repoTag) > 0 { + if _, ok := rootRepoMap[repoName]; !ok { + rootRepoMap[repoName] = Repository{repoTag: img.ID} + } else { + log.Debugf("Duplicate key [%s]", repoName) + newRepo := Repository{repoTag: img.ID} + if rootRepoMap[repoName].Contains(newRepo) { + log.Debugf("skipping, because it is present [%s:%q]", repoName, newRepo) + continue + } + log.Debugf("updating [%s]: [%q] with [%q]", repoName, rootRepoMap[repoName], newRepo) + rootRepoMap[repoName].Update(newRepo) + } + } + if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { + return job.Error(err) + } + + } else { + // this must be an ID that didn't get looked up just right? + if err := s.exportImage(job.Eng, name, tempdir); err != nil { + return job.Error(err) + } + } + } + log.Debugf("End Serializing %s", name) + } + // write repositories, if there is something to write + if len(rootRepoMap) > 0 { + rootRepoJson, _ := json.Marshal(rootRepoMap) + if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil { + return job.Error(err) + } + } else { + log.Debugf("There were no repositories to write") + } + + fs, err := archive.Tar(tempdir, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + defer fs.Close() + + if _, err := io.Copy(job.Stdout, fs); err != nil { + return job.Error(err) + } + log.Debugf("End export job: %s", job.Name) + return engine.StatusOK +} + +// FIXME: this should be a top-level function, not a class method +func (s *TagStore) exportImage(eng *engine.Engine, name, tempdir string) error { + for n := name; n != ""; { + // temporary directory + tmpImageDir := path.Join(tempdir, n) + if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { + if os.IsExist(err) { + return nil + } + return err + } + + var version = "1.0" + var versionBuf = []byte(version) + + if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil { + return err + } + + // serialize json + json, err := os.Create(path.Join(tmpImageDir, "json")) + if err != nil { + return err + } + job := eng.Job("image_inspect", n) + job.SetenvBool("raw", true) + job.Stdout.Add(json) + if err := job.Run(); err != nil { + return err + } + + // serialize filesystem + fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) + if err != nil { + return err + } + job = eng.Job("image_tarlayer", n) + job.Stdout.Add(fsTar) + if err := job.Run(); err != nil { + return err + } + + // find parent + job = eng.Job("image_get", n) + info, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + return err + } + n = info.Get("Parent") + } + return nil +} diff --git a/graph/graph.go b/graph/graph.go new file mode 100644 index 00000000..00c0324e --- /dev/null +++ b/graph/graph.go @@ -0,0 +1,397 @@ +package graph + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +// A Graph is a store for versioned filesystem images and the relationship between them. +type Graph struct { + Root string + idIndex *truncindex.TruncIndex + driver graphdriver.Driver +} + +// NewGraph instantiates a new graph at the given root path in the filesystem. +// `root` will be created if it doesn't exist. +func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { + abspath, err := filepath.Abs(root) + if err != nil { + return nil, err + } + // Create the root directory if it doesn't exists + if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + graph := &Graph{ + Root: abspath, + idIndex: truncindex.NewTruncIndex([]string{}), + driver: driver, + } + if err := graph.restore(); err != nil { + return nil, err + } + return graph, nil +} + +func (graph *Graph) restore() error { + dir, err := ioutil.ReadDir(graph.Root) + if err != nil { + return err + } + var ids = []string{} + for _, v := range dir { + id := v.Name() + if graph.driver.Exists(id) { + ids = append(ids, id) + } + } + graph.idIndex = truncindex.NewTruncIndex(ids) + log.Debugf("Restored %d elements", len(dir)) + return nil +} + +// FIXME: Implement error subclass instead of looking at the error text +// Note: This is the way golang implements os.IsNotExists on Plan9 +func (graph *Graph) IsNotExist(err error) bool { + return err != nil && (strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "No such")) +} + +// Exists returns true if an image is registered at the given id. +// If the image doesn't exist or if an error is encountered, false is returned. +func (graph *Graph) Exists(id string) bool { + if _, err := graph.Get(id); err != nil { + return false + } + return true +} + +// Get returns the image with the given id, or an error if the image doesn't exist. +func (graph *Graph) Get(name string) (*image.Image, error) { + id, err := graph.idIndex.Get(name) + if err != nil { + return nil, err + } + img, err := image.LoadImage(graph.ImageRoot(id)) + if err != nil { + return nil, err + } + if img.ID != id { + return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) + } + img.SetGraph(graph) + + if img.Size < 0 { + size, err := graph.driver.DiffSize(img.ID, img.Parent) + if err != nil { + return nil, fmt.Errorf("unable to calculate size of image id %q: %s", img.ID, err) + } + + img.Size = size + if err := img.SaveSize(graph.ImageRoot(id)); err != nil { + return nil, err + } + } + return img, nil +} + +// Create creates a new image and registers it in the graph. +func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { + img := &image.Image{ + ID: utils.GenerateRandomID(), + Comment: comment, + Created: time.Now().UTC(), + DockerVersion: dockerversion.VERSION, + Author: author, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + + if containerID != "" { + img.Parent = containerImage + img.Container = containerID + img.ContainerConfig = *containerConfig + } + + if err := graph.Register(img, nil, layerData); err != nil { + return nil, err + } + return img, nil +} + +// Register imports a pre-existing image into the graph. +func (graph *Graph) Register(img *image.Image, jsonData []byte, layerData archive.ArchiveReader) (err error) { + defer func() { + // If any error occurs, remove the new dir from the driver. + // Don't check for errors since the dir might not have been created. + // FIXME: this leaves a possible race condition. + if err != nil { + graph.driver.Remove(img.ID) + } + }() + if err := utils.ValidateID(img.ID); err != nil { + return err + } + // (This is a convenience to save time. Race conditions are taken care of by os.Rename) + if graph.Exists(img.ID) { + return fmt.Errorf("Image %s already exists", img.ID) + } + + // Ensure that the image root does not exist on the filesystem + // when it is not registered in the graph. + // This is common when you switch from one graph driver to another + if err := os.RemoveAll(graph.ImageRoot(img.ID)); err != nil && !os.IsNotExist(err) { + return err + } + + // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. + // (the graph is the source of truth). + // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. + // (FIXME: make that mandatory for drivers). + graph.driver.Remove(img.ID) + + tmp, err := graph.Mktemp("") + defer os.RemoveAll(tmp) + if err != nil { + return fmt.Errorf("Mktemp failed: %s", err) + } + + // Create root filesystem in the driver + if err := graph.driver.Create(img.ID, img.Parent); err != nil { + return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) + } + // Apply the diff/layer + img.SetGraph(graph) + if err := image.StoreImage(img, jsonData, layerData, tmp); err != nil { + return err + } + // Commit + if err := os.Rename(tmp, graph.ImageRoot(img.ID)); err != nil { + return err + } + graph.idIndex.Add(img.ID) + return nil +} + +// TempLayerArchive creates a temporary archive of the given image's filesystem layer. +// The archive is stored on disk and will be automatically deleted as soon as has been read. +// If output is not nil, a human-readable progress bar will be written to it. +// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives? +func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { + image, err := graph.Get(id) + if err != nil { + return nil, err + } + tmp, err := graph.Mktemp("") + if err != nil { + return nil, err + } + a, err := image.TarLayer() + if err != nil { + return nil, err + } + progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") + defer progress.Close() + return archive.NewTempArchive(progress, tmp) +} + +// Mktemp creates a temporary sub-directory inside the graph's filesystem. +func (graph *Graph) Mktemp(id string) (string, error) { + dir := path.Join(graph.Root, "_tmp", utils.GenerateRandomID()) + if err := os.MkdirAll(dir, 0700); err != nil { + return "", err + } + return dir, nil +} + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func SetupInitLayer(initLayer string) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerinit": "file", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = path.Join(prev, p) + syscall.Unlink(path.Join(initLayer, prev)) + } + + if _, err := os.Stat(path.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil { + return err + } + switch typ { + case "dir": + if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil { + return err + } + case "file": + f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + default: + if err := os.Symlink(typ, path.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} + +// Check if given error is "not empty". +// Note: this is the way golang does it internally with os.IsNotExists. +func isNotEmpty(err error) bool { + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + err = pe.Err + case *os.LinkError: + err = pe.Err + } + return strings.Contains(err.Error(), " not empty") +} + +// Delete atomically removes an image from the graph. +func (graph *Graph) Delete(name string) error { + id, err := graph.idIndex.Get(name) + if err != nil { + return err + } + tmp, err := graph.Mktemp("") + graph.idIndex.Delete(id) + if err == nil { + err = os.Rename(graph.ImageRoot(id), tmp) + // On err make tmp point to old dir and cleanup unused tmp dir + if err != nil { + os.RemoveAll(tmp) + tmp = graph.ImageRoot(id) + } + } else { + // On err make tmp point to old dir for cleanup + tmp = graph.ImageRoot(id) + } + // Remove rootfs data from the driver + graph.driver.Remove(id) + // Remove the trashed image directory + return os.RemoveAll(tmp) +} + +// Map returns a list of all images in the graph, addressable by ID. +func (graph *Graph) Map() (map[string]*image.Image, error) { + images := make(map[string]*image.Image) + err := graph.walkAll(func(image *image.Image) { + images[image.ID] = image + }) + if err != nil { + return nil, err + } + return images, nil +} + +// walkAll iterates over each image in the graph, and passes it to a handler. +// The walking order is undetermined. +func (graph *Graph) walkAll(handler func(*image.Image)) error { + files, err := ioutil.ReadDir(graph.Root) + if err != nil { + return err + } + for _, st := range files { + if img, err := graph.Get(st.Name()); err != nil { + // Skip image + continue + } else if handler != nil { + handler(img) + } + } + return nil +} + +// ByParent returns a lookup table of images by their parent. +// If an image of id ID has 3 children images, then the value for key ID +// will be a list of 3 images. +// If an image has no children, it will not have an entry in the table. +func (graph *Graph) ByParent() (map[string][]*image.Image, error) { + byParent := make(map[string][]*image.Image) + err := graph.walkAll(func(img *image.Image) { + parent, err := graph.Get(img.Parent) + if err != nil { + return + } + if children, exists := byParent[parent.ID]; exists { + byParent[parent.ID] = append(children, img) + } else { + byParent[parent.ID] = []*image.Image{img} + } + }) + return byParent, err +} + +// Heads returns all heads in the graph, keyed by id. +// A head is an image which is not the parent of another image in the graph. +func (graph *Graph) Heads() (map[string]*image.Image, error) { + heads := make(map[string]*image.Image) + byParent, err := graph.ByParent() + if err != nil { + return nil, err + } + err = graph.walkAll(func(image *image.Image) { + // If it's not in the byParent lookup table, then + // it's not a parent -> so it's a head! + if _, exists := byParent[image.ID]; !exists { + heads[image.ID] = image + } + }) + return heads, err +} + +func (graph *Graph) ImageRoot(id string) string { + return path.Join(graph.Root, id) +} + +func (graph *Graph) Driver() graphdriver.Driver { + return graph.driver +} diff --git a/graph/history.go b/graph/history.go new file mode 100644 index 00000000..2030c4c7 --- /dev/null +++ b/graph/history.go @@ -0,0 +1,46 @@ +package graph + +import ( + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" +) + +func (s *TagStore) CmdHistory(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + name := job.Args[0] + foundImage, err := s.LookupImage(name) + if err != nil { + return job.Error(err) + } + + lookupMap := make(map[string][]string) + for name, repository := range s.Repositories { + for tag, id := range repository { + // If the ID already has a reverse lookup, do not update it unless for "latest" + if _, exists := lookupMap[id]; !exists { + lookupMap[id] = []string{} + } + lookupMap[id] = append(lookupMap[id], name+":"+tag) + } + } + + outs := engine.NewTable("Created", 0) + err = foundImage.WalkHistory(func(img *image.Image) error { + out := &engine.Env{} + out.Set("Id", img.ID) + out.SetInt64("Created", img.Created.Unix()) + out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) + out.SetList("Tags", lookupMap[img.ID]) + out.SetInt64("Size", img.Size) + outs.Add(out) + return nil + }) + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/graph/import.go b/graph/import.go new file mode 100644 index 00000000..36d0d3fe --- /dev/null +++ b/graph/import.go @@ -0,0 +1,61 @@ +package graph + +import ( + "net/http" + "net/url" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/utils" +) + +func (s *TagStore) CmdImport(job *engine.Job) engine.Status { + if n := len(job.Args); n != 2 && n != 3 { + return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) + } + var ( + src = job.Args[0] + repo = job.Args[1] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + archive archive.ArchiveReader + resp *http.Response + ) + if len(job.Args) > 2 { + tag = job.Args[2] + } + + if src == "-" { + archive = job.Stdin + } else { + u, err := url.Parse(src) + if err != nil { + return job.Error(err) + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = utils.Download(u.String()) + if err != nil { + return job.Error(err) + } + progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") + defer progressReader.Close() + archive = progressReader + } + img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil) + if err != nil { + return job.Error(err) + } + // Optionally register the image at REPO/TAG + if repo != "" { + if err := s.Set(repo, tag, img.ID, true); err != nil { + return job.Error(err) + } + } + job.Stdout.Write(sf.FormatStatus("", img.ID)) + return engine.StatusOK +} diff --git a/graph/list.go b/graph/list.go new file mode 100644 index 00000000..0e0e97e4 --- /dev/null +++ b/graph/list.go @@ -0,0 +1,103 @@ +package graph + +import ( + "fmt" + "log" + "path" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers/filters" +) + +func (s *TagStore) CmdImages(job *engine.Job) engine.Status { + var ( + allImages map[string]*image.Image + err error + filt_tagged = true + ) + + imageFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + if i, ok := imageFilters["dangling"]; ok { + for _, value := range i { + if strings.ToLower(value) == "true" { + filt_tagged = false + } + } + } + + if job.GetenvBool("all") && filt_tagged { + allImages, err = s.graph.Map() + } else { + allImages, err = s.graph.Heads() + } + if err != nil { + return job.Error(err) + } + lookup := make(map[string]*engine.Env) + s.Lock() + for name, repository := range s.Repositories { + if job.Getenv("filter") != "" { + if match, _ := path.Match(job.Getenv("filter"), name); !match { + continue + } + } + for tag, id := range repository { + image, err := s.graph.Get(id) + if err != nil { + log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) + continue + } + + if out, exists := lookup[id]; exists { + if filt_tagged { + out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) + } + } else { + // get the boolean list for if only the untagged images are requested + delete(allImages, id) + if filt_tagged { + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + lookup[id] = out + } + } + + } + } + s.Unlock() + + outs := engine.NewTable("Created", len(lookup)) + for _, value := range lookup { + outs.Add(value) + } + + // Display images which aren't part of a repository/tag + if job.Getenv("filter") == "" { + for _, image := range allImages { + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{":"}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + outs.Add(out) + } + } + + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/graph/load.go b/graph/load.go new file mode 100644 index 00000000..f27aca4a --- /dev/null +++ b/graph/load.go @@ -0,0 +1,134 @@ +package graph + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" +) + +// Loads a set of images into the repository. This is the complementary of ImageExport. +// The input stream is an uncompressed tar ball containing images and metadata. +func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { + tmpImageDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(tmpImageDir) + + var ( + repoTarFile = path.Join(tmpImageDir, "repo.tar") + repoDir = path.Join(tmpImageDir, "repo") + ) + + tarFile, err := os.Create(repoTarFile) + if err != nil { + return job.Error(err) + } + if _, err := io.Copy(tarFile, job.Stdin); err != nil { + return job.Error(err) + } + tarFile.Close() + + repoFile, err := os.Open(repoTarFile) + if err != nil { + return job.Error(err) + } + if err := os.Mkdir(repoDir, os.ModeDir); err != nil { + return job.Error(err) + } + images, err := s.graph.Map() + if err != nil { + return job.Error(err) + } + excludes := make([]string, len(images)) + i := 0 + for k := range images { + excludes[i] = k + i++ + } + if err := chrootarchive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil { + return job.Error(err) + } + + dirs, err := ioutil.ReadDir(repoDir) + if err != nil { + return job.Error(err) + } + + for _, d := range dirs { + if d.IsDir() { + if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil { + return job.Error(err) + } + } + } + + repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) + if err == nil { + repositories := map[string]Repository{} + if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { + return job.Error(err) + } + + for imageName, tagMap := range repositories { + for tag, address := range tagMap { + if err := s.Set(imageName, tag, address, true); err != nil { + return job.Error(err) + } + } + } + } else if !os.IsNotExist(err) { + return job.Error(err) + } + + return engine.StatusOK +} + +func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error { + if err := eng.Job("image_get", address).Run(); err != nil { + log.Debugf("Loading %s", address) + + imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) + if err != nil { + log.Debugf("Error reading json", err) + return err + } + + layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) + if err != nil { + log.Debugf("Error reading embedded tar", err) + return err + } + img, err := image.NewImgJSON(imageJson) + if err != nil { + log.Debugf("Error unmarshalling json", err) + return err + } + if err := utils.ValidateID(img.ID); err != nil { + log.Debugf("Error validating ID: %s", err) + return err + } + if img.Parent != "" { + if !s.graph.Exists(img.Parent) { + if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil { + return err + } + } + } + if err := s.graph.Register(img, imageJson, layer); err != nil { + return err + } + } + log.Debugf("Completed processing %s", address) + + return nil +} diff --git a/graph/pools_test.go b/graph/pools_test.go new file mode 100644 index 00000000..129a5e1f --- /dev/null +++ b/graph/pools_test.go @@ -0,0 +1,49 @@ +package graph + +import ( + "testing" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +func TestPools(t *testing.T) { + s := &TagStore{ + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + } + + if _, err := s.poolAdd("pull", "test1"); err != nil { + t.Fatal(err) + } + if _, err := s.poolAdd("pull", "test2"); err != nil { + t.Fatal(err) + } + if _, err := s.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := s.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := s.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } + if err := s.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("pull", "test1"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("push", "test1"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } +} diff --git a/graph/pull.go b/graph/pull.go new file mode 100644 index 00000000..05d5ec76 --- /dev/null +++ b/graph/pull.go @@ -0,0 +1,601 @@ +package graph + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/libtrust" +) + +func (s *TagStore) verifyManifest(eng *engine.Engine, manifestBytes []byte) (*registry.ManifestData, bool, error) { + sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures") + if err != nil { + return nil, false, fmt.Errorf("error parsing payload: %s", err) + } + keys, err := sig.Verify() + if err != nil { + return nil, false, fmt.Errorf("error verifying payload: %s", err) + } + + payload, err := sig.Payload() + if err != nil { + return nil, false, fmt.Errorf("error retrieving payload: %s", err) + } + + var manifest registry.ManifestData + if err := json.Unmarshal(payload, &manifest); err != nil { + return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err) + } + if manifest.SchemaVersion != 1 { + return nil, false, fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion) + } + + var verified bool + for _, key := range keys { + job := eng.Job("trust_key_check") + b, err := key.MarshalJSON() + if err != nil { + return nil, false, fmt.Errorf("error marshalling public key: %s", err) + } + namespace := manifest.Name + if namespace[0] != '/' { + namespace = "/" + namespace + } + stdoutBuffer := bytes.NewBuffer(nil) + + job.Args = append(job.Args, namespace) + job.Setenv("PublicKey", string(b)) + // Check key has read/write permission (0x03) + job.SetenvInt("Permission", 0x03) + job.Stdout.Add(stdoutBuffer) + if err = job.Run(); err != nil { + return nil, false, fmt.Errorf("error running key check: %s", err) + } + result := engine.Tail(stdoutBuffer, 1) + log.Debugf("Key check result: %q", result) + if result == "verified" { + verified = true + } + } + + return &manifest, verified, nil +} + +func (s *TagStore) CmdPull(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 && n != 2 { + return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) + } + + var ( + localName = job.Args[0] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = ®istry.AuthConfig{} + metaHeaders map[string][]string + mirrors []string + ) + + if len(job.Args) > 1 { + tag = job.Args[1] + } + + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", &metaHeaders) + + c, err := s.poolAdd("pull", localName+":"+tag) + if err != nil { + if c != nil { + // Another pull of the same repository is already taking place; just wait for it to finish + job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) + <-c + return engine.StatusOK + } + return job.Error(err) + } + defer s.poolRemove("pull", localName+":"+tag) + + // Resolve the Repository name from fqn to endpoint + name + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries) + if err != nil { + return job.Error(err) + } + + r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) + if err != nil { + return job.Error(err) + } + + var isOfficial bool + if endpoint.VersionString(1) == registry.IndexServerAddress() { + // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" + localName = remoteName + + isOfficial = isOfficialName(remoteName) + if isOfficial && strings.IndexRune(remoteName, '/') == -1 { + remoteName = "library/" + remoteName + } + + // Use provided mirrors, if any + mirrors = s.mirrors + } + + if len(mirrors) == 0 && (isOfficial || endpoint.Version == registry.APIVersion2) { + j := job.Eng.Job("trust_update_base") + if err = j.Run(); err != nil { + return job.Errorf("error updating trust base graph: %s", err) + } + + if err := s.pullV2Repository(job.Eng, r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err == nil { + return engine.StatusOK + } else if err != registry.ErrDoesNotExist { + log.Errorf("Error from V2 registry: %s", err) + } + } + + if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel"), mirrors); err != nil { + return job.Error(err) + } + + return engine.StatusOK +} + +func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool, mirrors []string) error { + out.Write(sf.FormatStatus("", "Pulling repository %s", localName)) + + repoData, err := r.GetRepositoryData(remoteName) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", remoteName) + } + // Unexpected HTTP error + return err + } + + log.Debugf("Retrieving the tag list") + tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) + if err != nil { + log.Errorf("%v", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + log.Debugf("Registering tags") + // If no tag has been specified, pull them all + var imageId string + if askedTag == "" { + for tag, id := range tagsList { + repoData.ImgList[id].Tag = tag + } + } else { + // Otherwise, check that the tag exists and use only that one + id, exists := tagsList[askedTag] + if !exists { + return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName) + } + imageId = id + repoData.ImgList[id].Tag = askedTag + } + + errors := make(chan error) + + layers_downloaded := false + for _, image := range repoData.ImgList { + downloadImage := func(img *registry.ImgData) { + if askedTag != "" && img.Tag != askedTag { + log.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) + if parallel { + errors <- nil + } + return + } + + if img.Tag == "" { + log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + if parallel { + errors <- nil + } + return + } + + // ensure no two downloads of the same image happen at the same time + if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { + if c != nil { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + } else { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + } + if parallel { + errors <- nil + } + return + } + defer s.poolRemove("pull", "img:"+img.ID) + + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil)) + success := false + var lastErr, err error + var is_downloaded bool + if mirrors != nil { + for _, ep := range mirrors { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, localName, ep), nil)) + if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { + // Don't report errors when pulling from mirrors. + log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, localName, ep, err) + continue + } + layers_downloaded = layers_downloaded || is_downloaded + success = true + break + } + } + if !success { + for _, ep := range repoData.Endpoints { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil)) + if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil)) + continue + } + layers_downloaded = layers_downloaded || is_downloaded + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, localName, lastErr) + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil)) + if parallel { + errors <- err + return + } + } + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + + if parallel { + errors <- nil + } + } + + if parallel { + go downloadImage(image) + } else { + downloadImage(image) + } + } + if parallel { + var lastError error + for i := 0; i < len(repoData.ImgList); i++ { + if err := <-errors; err != nil { + lastError = err + } + } + if lastError != nil { + return lastError + } + + } + for tag, id := range tagsList { + if askedTag != "" && id != imageId { + continue + } + if err := s.Set(localName, tag, id, true); err != nil { + return err + } + } + + requestedTag := localName + if len(askedTag) > 0 { + requestedTag = localName + ":" + askedTag + } + WriteStatus(requestedTag, out, sf, layers_downloaded) + return nil +} + +func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) { + history, err := r.GetRemoteHistory(imgID, endpoint, token) + if err != nil { + return false, err + } + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) + // FIXME: Try to stream the images? + // FIXME: Launch the getRemoteImage() in goroutines + + layers_downloaded := false + for i := len(history) - 1; i >= 0; i-- { + id := history[i] + + // ensure no two downloads of the same layer happen at the same time + if c, err := s.poolAdd("pull", "layer:"+id); err != nil { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) + <-c + } + defer s.poolRemove("pull", "layer:"+id) + + if !s.graph.Exists(id) { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) + var ( + imgJSON []byte + imgSize int + err error + img *image.Image + ) + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + img, err = image.NewImgJSON(imgJSON) + layers_downloaded = true + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err) + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else { + break + } + } + + for j := 1; j <= retries; j++ { + // Get the layer + status := "Pulling fs layer" + if j > 1 { + status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) + } + out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil)) + layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, err + } + layers_downloaded = true + defer layer.Close() + + err = s.graph.Register(img, imgJSON, + utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading")) + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) + return layers_downloaded, err + } else { + break + } + } + } + out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) + } + return layers_downloaded, nil +} + +func WriteStatus(requestedTag string, out io.Writer, sf *utils.StreamFormatter, layers_downloaded bool) { + if layers_downloaded { + out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag)) + } else { + out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag)) + } +} + +// downloadInfo is used to pass information from download to extractor +type downloadInfo struct { + imgJSON []byte + img *image.Image + tmpFile *os.File + length int64 + downloaded bool + err chan error +} + +func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) error { + var layersDownloaded bool + if tag == "" { + log.Debugf("Pulling tag list from V2 registry for %s", remoteName) + tags, err := r.GetV2RemoteTags(remoteName, nil) + if err != nil { + return err + } + for _, t := range tags { + if downloaded, err := s.pullV2Tag(eng, r, out, localName, remoteName, t, sf, parallel); err != nil { + return err + } else if downloaded { + layersDownloaded = true + } + } + } else { + if downloaded, err := s.pullV2Tag(eng, r, out, localName, remoteName, tag, sf, parallel); err != nil { + return err + } else if downloaded { + layersDownloaded = true + } + } + + requestedTag := localName + if len(tag) > 0 { + requestedTag = localName + ":" + tag + } + WriteStatus(requestedTag, out, sf, layersDownloaded) + return nil +} + +func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) (bool, error) { + log.Debugf("Pulling tag from V2 registry: %q", tag) + manifestBytes, err := r.GetV2ImageManifest(remoteName, tag, nil) + if err != nil { + return false, err + } + + manifest, verified, err := s.verifyManifest(eng, manifestBytes) + if err != nil { + return false, fmt.Errorf("error verifying manifest: %s", err) + } + + if len(manifest.FSLayers) != len(manifest.History) { + return false, fmt.Errorf("length of history not equal to number of layers") + } + + if verified { + out.Write(sf.FormatStatus(localName+":"+tag, "The image you are pulling has been verified")) + } else { + out.Write(sf.FormatStatus(tag, "Pulling from %s", localName)) + } + + if len(manifest.FSLayers) == 0 { + return false, fmt.Errorf("no blobSums in manifest") + } + + downloads := make([]downloadInfo, len(manifest.FSLayers)) + + for i := len(manifest.FSLayers) - 1; i >= 0; i-- { + var ( + sumStr = manifest.FSLayers[i].BlobSum + imgJSON = []byte(manifest.History[i].V1Compatibility) + ) + + img, err := image.NewImgJSON(imgJSON) + if err != nil { + return false, fmt.Errorf("failed to parse json: %s", err) + } + downloads[i].img = img + + // Check if exists + if s.graph.Exists(img.ID) { + log.Debugf("Image already exists: %s", img.ID) + continue + } + + chunks := strings.SplitN(sumStr, ":", 2) + if len(chunks) < 2 { + return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks) + } + sumType, checksum := chunks[0], chunks[1] + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling fs layer", nil)) + + downloadFunc := func(di *downloadInfo) error { + log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) + + if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { + if c != nil { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + } else { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + } + } else { + defer s.poolRemove("pull", "img:"+img.ID) + tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob") + if err != nil { + return err + } + + r, l, err := r.GetV2ImageBlobReader(remoteName, sumType, checksum, nil) + if err != nil { + return err + } + defer r.Close() + io.Copy(tmpFile, utils.ProgressReader(r, int(l), out, sf, false, utils.TruncateID(img.ID), "Downloading")) + + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + + log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) + di.tmpFile = tmpFile + di.length = l + di.downloaded = true + } + di.imgJSON = imgJSON + + return nil + } + + if parallel { + downloads[i].err = make(chan error) + go func(di *downloadInfo) { + di.err <- downloadFunc(di) + }(&downloads[i]) + } else { + err := downloadFunc(&downloads[i]) + if err != nil { + return false, err + } + } + } + + var layersDownloaded bool + for i := len(downloads) - 1; i >= 0; i-- { + d := &downloads[i] + if d.err != nil { + err := <-d.err + if err != nil { + return false, err + } + } + if d.downloaded { + // if tmpFile is empty assume download and extracted elsewhere + defer os.Remove(d.tmpFile.Name()) + defer d.tmpFile.Close() + d.tmpFile.Seek(0, 0) + if d.tmpFile != nil { + err = s.graph.Register(d.img, d.imgJSON, + utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting")) + if err != nil { + return false, err + } + + // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) + } + out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Pull complete", nil)) + layersDownloaded = true + } else { + out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Already exists", nil)) + } + + } + + if err = s.Set(localName, tag, downloads[0].img.ID, true); err != nil { + return false, err + } + + return layersDownloaded, nil +} diff --git a/graph/push.go b/graph/push.go new file mode 100644 index 00000000..165b580f --- /dev/null +++ b/graph/push.go @@ -0,0 +1,250 @@ +package graph + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +// Retrieve the all the images to be uploaded in the correct order +func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) { + var ( + imageList []string + imagesSeen = make(map[string]bool) + tagsByImage = make(map[string][]string) + ) + + for tag, id := range localRepo { + if requestedTag != "" && requestedTag != tag { + continue + } + var imageListForThisTag []string + + tagsByImage[id] = append(tagsByImage[id], tag) + + for img, err := s.graph.Get(id); img != nil; img, err = img.GetParent() { + if err != nil { + return nil, nil, err + } + + if imagesSeen[img.ID] { + // This image is already on the list, we can ignore it and all its parents + break + } + + imagesSeen[img.ID] = true + imageListForThisTag = append(imageListForThisTag, img.ID) + } + + // reverse the image list for this tag (so the "most"-parent image is first) + for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { + imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + log.Debugf("Image list: %v", imageList) + log.Debugf("Tags by image: %v", tagsByImage) + + return imageList, tagsByImage, nil +} + +func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { + out = utils.NewWriteFlusher(out) + log.Debugf("Local repo: %s", localRepo) + imgList, tagsByImage, err := s.getImageList(localRepo, tag) + if err != nil { + return err + } + + out.Write(sf.FormatStatus("", "Sending image list")) + + var ( + repoData *registry.RepositoryData + imageIndex []*registry.ImgData + ) + + for _, imgId := range imgList { + if tags, exists := tagsByImage[imgId]; exists { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: tag, + }) + } + } else { + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is accociated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: "", + }) + + } + } + + log.Debugf("Preparing to push %s with the following images and tags", localRepo) + for _, data := range imageIndex { + log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) + if err != nil { + return err + } + + nTag := 1 + if tag == "" { + nTag = len(localRepo) + } + for _, ep := range repoData.Endpoints { + out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag)) + + for _, imgId := range imgList { + if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { + out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) + } else { + if _, err := s.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { + // FIXME: Continue on error? + return err + } + } + + for _, tag := range tagsByImage[imgId] { + out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) + + if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { + return err + } + } + } + } + + if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { + return err + } + + return nil +} + +func (s *TagStore) pushImage(r *registry.Session, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { + out = utils.NewWriteFlusher(out) + jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json")) + if err != nil { + return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) + } + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) + + imgData := ®istry.ImgData{ + ID: imgID, + } + + // Send the json + if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { + if err == registry.ErrAlreadyExists { + out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) + return "", nil + } + return "", err + } + + layerData, err := s.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out) + if err != nil { + return "", fmt.Errorf("Failed to generate layer archive: %s", err) + } + defer os.RemoveAll(layerData.Name()) + + // Send the layer + log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) + + checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { + return "", err + } + + out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) + return imgData.Checksum, nil +} + +// FIXME: Allow to interrupt current push when new push of same image is done. +func (s *TagStore) CmdPush(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + var ( + localName = job.Args[0] + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = ®istry.AuthConfig{} + metaHeaders map[string][]string + ) + + tag := job.Getenv("tag") + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", &metaHeaders) + if _, err := s.poolAdd("push", localName); err != nil { + return job.Error(err) + } + defer s.poolRemove("push", localName) + + // Resolve the Repository name from fqn to endpoint + name + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries) + if err != nil { + return job.Error(err) + } + + img, err := s.graph.Get(localName) + r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) + if err2 != nil { + return job.Error(err2) + } + + if err != nil { + reposLen := 1 + if tag == "" { + reposLen = len(s.Repositories[localName]) + } + job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) + // If it fails, try to get the repository + if localRepo, exists := s.Repositories[localName]; exists { + if err := s.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Error(err) + } + + var token []string + job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) + if _, err := s.pushImage(r, job.Stdout, remoteName, img.ID, endpoint.String(), token, sf); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/graph/service.go b/graph/service.go new file mode 100644 index 00000000..1be986f8 --- /dev/null +++ b/graph/service.go @@ -0,0 +1,182 @@ +package graph + +import ( + "fmt" + "io" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/log" +) + +func (s *TagStore) Install(eng *engine.Engine) error { + for name, handler := range map[string]engine.Handler{ + "image_set": s.CmdSet, + "image_tag": s.CmdTag, + "tag": s.CmdTagLegacy, // FIXME merge with "image_tag" + "image_get": s.CmdGet, + "image_inspect": s.CmdLookup, + "image_tarlayer": s.CmdTarLayer, + "image_export": s.CmdImageExport, + "history": s.CmdHistory, + "images": s.CmdImages, + "viz": s.CmdViz, + "load": s.CmdLoad, + "import": s.CmdImport, + "pull": s.CmdPull, + "push": s.CmdPush, + } { + if err := eng.Register(name, handler); err != nil { + return fmt.Errorf("Could not register %q: %v", name, err) + } + } + return nil +} + +// CmdSet stores a new image in the graph. +// Images are stored in the graph using 4 elements: +// - A user-defined ID +// - A collection of metadata describing the image +// - A directory tree stored as a tar archive (also called the "layer") +// - A reference to a "parent" ID on top of which the layer should be applied +// +// NOTE: even though the parent ID is only useful in relation to the layer and how +// to apply it (ie you could represent the full directory tree as 'parent_layer + layer', +// it is treated as a top-level property of the image. This is an artifact of early +// design and should probably be cleaned up in the future to simplify the design. +// +// Syntax: image_set ID +// Input: +// - Layer content must be streamed in tar format on stdin. An empty input is +// valid and represents a nil layer. +// +// - Image metadata must be passed in the command environment. +// 'json': a json-encoded object with all image metadata. +// It will be stored as-is, without any encoding/decoding artifacts. +// That is a requirement of the current registry client implementation, +// because a re-encoded json might invalidate the image checksum at +// the next upload, even with functionaly identical content. +func (s *TagStore) CmdSet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + var ( + imgJSON = []byte(job.Getenv("json")) + layer = job.Stdin + ) + if len(imgJSON) == 0 { + return job.Errorf("mandatory key 'json' is not set") + } + // We have to pass an *image.Image object, even though it will be completely + // ignored in favor of the redundant json data. + // FIXME: the current prototype of Graph.Register is stupid and redundant. + img, err := image.NewImgJSON(imgJSON) + if err != nil { + return job.Error(err) + } + if err := s.graph.Register(img, imgJSON, layer); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// CmdGet returns information about an image. +// If the image doesn't exist, an empty object is returned, to allow +// checking for an image's existence. +func (s *TagStore) CmdGet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + res := &engine.Env{} + img, err := s.LookupImage(name) + // Note: if the image doesn't exist, LookupImage returns + // nil, nil. + if err != nil { + return job.Error(err) + } + if img != nil { + // We don't directly expose all fields of the Image objects, + // to maintain a clean public API which we can maintain over + // time even if the underlying structure changes. + // We should have done this with the Image object to begin with... + // but we didn't, so now we're doing it here. + // + // Fields that we're probably better off not including: + // - Config/ContainerConfig. Those structs have the same sprawl problem, + // so we shouldn't include them wholesale either. + // - Comment: initially created to fulfill the "every image is a git commit" + // metaphor, in practice people either ignore it or use it as a + // generic description field which it isn't. On deprecation shortlist. + res.SetAuto("Created", img.Created) + res.Set("Author", img.Author) + res.Set("Os", img.OS) + res.Set("Architecture", img.Architecture) + res.Set("DockerVersion", img.DockerVersion) + res.Set("Id", img.ID) + res.Set("Parent", img.Parent) + } + res.WriteTo(job.Stdout) + return engine.StatusOK +} + +// CmdLookup return an image encoded in JSON +func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + if job.GetenvBool("raw") { + b, err := image.RawJson() + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + + out := &engine.Env{} + out.Set("Id", image.ID) + out.Set("Parent", image.Parent) + out.Set("Comment", image.Comment) + out.SetAuto("Created", image.Created) + out.Set("Container", image.Container) + out.SetJson("ContainerConfig", image.ContainerConfig) + out.Set("DockerVersion", image.DockerVersion) + out.Set("Author", image.Author) + out.SetJson("Config", image.Config) + out.Set("Architecture", image.Architecture) + out.Set("Os", image.OS) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + if _, err = out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} + +// CmdTarLayer return the tarLayer of the image +func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + fs, err := image.TarLayer() + if err != nil { + return job.Error(err) + } + defer fs.Close() + + written, err := io.Copy(job.Stdout, fs) + if err != nil { + return job.Error(err) + } + log.Debugf("rendered layer for %s of [%d] size", image.ID, written) + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} diff --git a/graph/tag.go b/graph/tag.go new file mode 100644 index 00000000..3d89422f --- /dev/null +++ b/graph/tag.go @@ -0,0 +1,44 @@ +package graph + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers" +) + +// CmdTag assigns a new name and tag to an existing image. If the tag already exists, +// it is changed and the image previously referenced by the tag loses that reference. +// This may cause the old image to be garbage-collected if its reference count reaches zero. +// +// Syntax: image_tag NEWNAME OLDNAME +// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 +func (s *TagStore) CmdTag(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) + } + var ( + newName = job.Args[0] + oldName = job.Args[1] + ) + newRepo, newTag := parsers.ParseRepositoryTag(newName) + // FIXME: Set should either parse both old and new name, or neither. + // the current prototype is inconsistent. + if err := s.Set(newRepo, newTag, oldName, true); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. +func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { + if len(job.Args) != 2 && len(job.Args) != 3 { + return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) + } + var tag string + if len(job.Args) == 3 { + tag = job.Args[2] + } + if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/graph/tags.go b/graph/tags.go new file mode 100644 index 00000000..622d6209 --- /dev/null +++ b/graph/tags.go @@ -0,0 +1,356 @@ +package graph + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/utils" +) + +const DEFAULTTAG = "latest" + +var ( + validTagName = regexp.MustCompile(`^[\w][\w.-]{0,127}$`) +) + +type TagStore struct { + path string + graph *Graph + mirrors []string + insecureRegistries []string + Repositories map[string]Repository + sync.Mutex + // FIXME: move push/pull-related fields + // to a helper type + pullingPool map[string]chan struct{} + pushingPool map[string]chan struct{} +} + +type Repository map[string]string + +// update Repository mapping with content of u +func (r Repository) Update(u Repository) { + for k, v := range u { + r[k] = v + } +} + +// return true if the contents of u Repository, are wholly contained in r Repository +func (r Repository) Contains(u Repository) bool { + for k, v := range u { + // if u's key is not present in r OR u's key is present, but not the same value + if rv, ok := r[k]; !ok || (ok && rv != v) { + return false + } + } + return true +} + +func NewTagStore(path string, graph *Graph, mirrors []string, insecureRegistries []string) (*TagStore, error) { + abspath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + store := &TagStore{ + path: abspath, + graph: graph, + mirrors: mirrors, + insecureRegistries: insecureRegistries, + Repositories: make(map[string]Repository), + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +func (store *TagStore) save() error { + // Store the json ball + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { + return err + } + return nil +} + +func (store *TagStore) reload() error { + jsonData, err := ioutil.ReadFile(store.path) + if err != nil { + return err + } + if err := json.Unmarshal(jsonData, store); err != nil { + return err + } + return nil +} + +func (store *TagStore) LookupImage(name string) (*image.Image, error) { + // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else + // (so we can pass all errors here) + repos, tag := parsers.ParseRepositoryTag(name) + if tag == "" { + tag = DEFAULTTAG + } + img, err := store.GetImage(repos, tag) + store.Lock() + defer store.Unlock() + if err != nil { + return nil, err + } else if img == nil { + if img, err = store.graph.Get(name); err != nil { + return nil, err + } + } + return img, nil +} + +// Return a reverse-lookup table of all the names which refer to each image +// Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} +func (store *TagStore) ByID() map[string][]string { + store.Lock() + defer store.Unlock() + byID := make(map[string][]string) + for repoName, repository := range store.Repositories { + for tag, id := range repository { + name := repoName + ":" + tag + if _, exists := byID[id]; !exists { + byID[id] = []string{name} + } else { + byID[id] = append(byID[id], name) + sort.Strings(byID[id]) + } + } + } + return byID +} + +func (store *TagStore) ImageName(id string) string { + if names, exists := store.ByID()[id]; exists && len(names) > 0 { + return names[0] + } + return utils.TruncateID(id) +} + +func (store *TagStore) DeleteAll(id string) error { + names, exists := store.ByID()[id] + if !exists || len(names) == 0 { + return nil + } + for _, name := range names { + if strings.Contains(name, ":") { + nameParts := strings.Split(name, ":") + if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { + return err + } + } else { + if _, err := store.Delete(name, ""); err != nil { + return err + } + } + } + return nil +} + +func (store *TagStore) Delete(repoName, tag string) (bool, error) { + store.Lock() + defer store.Unlock() + deleted := false + if err := store.reload(); err != nil { + return false, err + } + if r, exists := store.Repositories[repoName]; exists { + if tag != "" { + if _, exists2 := r[tag]; exists2 { + delete(r, tag) + if len(r) == 0 { + delete(store.Repositories, repoName) + } + deleted = true + } else { + return false, fmt.Errorf("No such tag: %s:%s", repoName, tag) + } + } else { + delete(store.Repositories, repoName) + deleted = true + } + } else { + return false, fmt.Errorf("No such repository: %s", repoName) + } + return deleted, store.save() +} + +func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { + img, err := store.LookupImage(imageName) + store.Lock() + defer store.Unlock() + if err != nil { + return err + } + if tag == "" { + tag = DEFAULTTAG + } + if err := validateRepoName(repoName); err != nil { + return err + } + if err := ValidateTagName(tag); err != nil { + return err + } + if err := store.reload(); err != nil { + return err + } + var repo Repository + if r, exists := store.Repositories[repoName]; exists { + repo = r + } else { + repo = make(map[string]string) + if old, exists := store.Repositories[repoName]; exists && !force { + return fmt.Errorf("Conflict: Tag %s:%s is already set to %s", repoName, tag, old) + } + store.Repositories[repoName] = repo + } + repo[tag] = img.ID + return store.save() +} + +func (store *TagStore) Get(repoName string) (Repository, error) { + store.Lock() + defer store.Unlock() + if err := store.reload(); err != nil { + return nil, err + } + if r, exists := store.Repositories[repoName]; exists { + return r, nil + } + return nil, nil +} + +func (store *TagStore) GetImage(repoName, tagOrID string) (*image.Image, error) { + repo, err := store.Get(repoName) + store.Lock() + defer store.Unlock() + if err != nil { + return nil, err + } else if repo == nil { + return nil, nil + } + if revision, exists := repo[tagOrID]; exists { + return store.graph.Get(revision) + } + // If no matching tag is found, search through images for a matching image id + for _, revision := range repo { + if strings.HasPrefix(revision, tagOrID) { + return store.graph.Get(revision) + } + } + return nil, nil +} + +func (store *TagStore) GetRepoRefs() map[string][]string { + store.Lock() + reporefs := make(map[string][]string) + + for name, repository := range store.Repositories { + for tag, id := range repository { + shortID := utils.TruncateID(id) + reporefs[shortID] = append(reporefs[shortID], fmt.Sprintf("%s:%s", name, tag)) + } + } + store.Unlock() + return reporefs +} + +// isOfficialName returns whether a repo name is considered an official +// repository. Official repositories are repos with names within +// the library namespace or which default to the library namespace +// by not providing one. +func isOfficialName(name string) bool { + if strings.HasPrefix(name, "library/") { + return true + } + if strings.IndexRune(name, '/') == -1 { + return true + } + return false +} + +// Validate the name of a repository +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + return nil +} + +// Validate the name of a tag +func ValidateTagName(name string) error { + if name == "" { + return fmt.Errorf("Tag name can't be empty") + } + if !validTagName.MatchString(name) { + return fmt.Errorf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed, minimum 2, maximum 30 in length", name) + } + return nil +} + +func (store *TagStore) poolAdd(kind, key string) (chan struct{}, error) { + store.Lock() + defer store.Unlock() + + if c, exists := store.pullingPool[key]; exists { + return c, fmt.Errorf("pull %s is already in progress", key) + } + if c, exists := store.pushingPool[key]; exists { + return c, fmt.Errorf("push %s is already in progress", key) + } + + c := make(chan struct{}) + switch kind { + case "pull": + store.pullingPool[key] = c + case "push": + store.pushingPool[key] = c + default: + return nil, fmt.Errorf("Unknown pool type") + } + return c, nil +} + +func (store *TagStore) poolRemove(kind, key string) error { + store.Lock() + defer store.Unlock() + switch kind { + case "pull": + if c, exists := store.pullingPool[key]; exists { + close(c) + delete(store.pullingPool, key) + } + case "push": + if c, exists := store.pushingPool[key]; exists { + close(c) + delete(store.pushingPool, key) + } + default: + return fmt.Errorf("Unknown pool type") + } + return nil +} diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go new file mode 100644 index 00000000..bf94deb4 --- /dev/null +++ b/graph/tags_unit_test.go @@ -0,0 +1,150 @@ +package graph + +import ( + "bytes" + "io" + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests + "github.com/docker/docker/image" + "github.com/docker/docker/utils" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +const ( + testImageName = "myapp" + testImageID = "1a2d3c4d4e5fa2d2a21acea242a5e2345d3aefc3e7dfa2a2a2a21a2a2ad2d234" +) + +func fakeTar() (io.Reader, error) { + uid := os.Getuid() + gid := os.Getgid() + + content := []byte("Hello world!\n") + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { + hdr := new(tar.Header) + + // Leaving these fields blank requires root privileges + hdr.Uid = uid + hdr.Gid = gid + + hdr.Size = int64(len(content)) + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + tw.Write([]byte(content)) + } + tw.Close() + return buf, nil +} + +func mkTestTagStore(root string, t *testing.T) *TagStore { + driver, err := graphdriver.New(root, nil) + if err != nil { + t.Fatal(err) + } + graph, err := NewGraph(root, driver) + if err != nil { + t.Fatal(err) + } + store, err := NewTagStore(path.Join(root, "tags"), graph, nil, nil) + if err != nil { + t.Fatal(err) + } + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img := &image.Image{ID: testImageID} + if err := graph.Register(img, nil, archive); err != nil { + t.Fatal(err) + } + if err := store.Set(testImageName, "", testImageID, false); err != nil { + t.Fatal(err) + } + return store +} + +func TestLookupImage(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + store := mkTestTagStore(tmp, t) + defer store.graph.driver.Cleanup() + + if img, err := store.LookupImage(testImageName); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage("fail:fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage(testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } +} + +func TestValidTagName(t *testing.T) { + validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err != nil { + t.Errorf("'%s' should've been a valid tag", tag) + } + } +} + +func TestInvalidTagName(t *testing.T) { + validTags := []string{"-9", ".foo", "-test", ".", "-"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err == nil { + t.Errorf("'%s' shouldn't have been a valid tag", tag) + } + } +} + +func TestOfficialName(t *testing.T) { + names := map[string]bool{ + "library/ubuntu": true, + "nonlibrary/ubuntu": false, + "ubuntu": true, + "other/library": false, + } + for name, isOfficial := range names { + result := isOfficialName(name) + if result != isOfficial { + t.Errorf("Unexpected result for %s\n\tExpecting: %v\n\tActual: %v", name, isOfficial, result) + } + } +} diff --git a/graph/viz.go b/graph/viz.go new file mode 100644 index 00000000..924c22b6 --- /dev/null +++ b/graph/viz.go @@ -0,0 +1,38 @@ +package graph + +import ( + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" +) + +func (s *TagStore) CmdViz(job *engine.Job) engine.Status { + images, _ := s.graph.Map() + if images == nil { + return engine.StatusOK + } + job.Stdout.Write([]byte("digraph docker {\n")) + + var ( + parentImage *image.Image + err error + ) + for _, image := range images { + parentImage, err = image.GetParent() + if err != nil { + return job.Errorf("Error while getting parent image: %v", err) + } + if parentImage != nil { + job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) + } else { + job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) + } + } + + for id, repos := range s.GetRepoRefs() { + job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) + } + job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) + return engine.StatusOK +} diff --git a/hack/CONTRIBUTORS.md b/hack/CONTRIBUTORS.md new file mode 120000 index 00000000..44fcc634 --- /dev/null +++ b/hack/CONTRIBUTORS.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/hack/MAINTAINERS b/hack/MAINTAINERS new file mode 100644 index 00000000..15e4433e --- /dev/null +++ b/hack/MAINTAINERS @@ -0,0 +1,4 @@ +Tianon Gravi (@tianon) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) +dind: Jerome Petazzoni (@jpetazzo) diff --git a/hack/MAINTAINERS.md b/hack/MAINTAINERS.md new file mode 100644 index 00000000..0a4cd144 --- /dev/null +++ b/hack/MAINTAINERS.md @@ -0,0 +1,130 @@ +# The Docker Maintainer manual + +## Introduction + +Dear maintainer. Thank you for investing the time and energy to help +make Docker as useful as possible. Maintaining a project is difficult, +sometimes unrewarding work. Sure, you will get to contribute cool +features to the project. But most of your time will be spent reviewing, +cleaning up, documenting, answering questions, and justifying design +decisions - while everyone has all the fun! But remember - the quality +of the maintainers' work is what distinguishes the good projects from +the great. So please be proud of your work, even the unglamourous parts, +and encourage a culture of appreciation and respect for *every* aspect +of improving the project - not just the hot new features. + +This document is a manual for maintainers old and new. It explains what +is expected of maintainers, how they should work, and what tools are +available to them. + +This is a living document - if you see something out of date or missing, +speak up! + +## What is a maintainer's responsibility? + +It is every maintainer's responsibility to: + +1. Expose a clear road map for improving their component. +2. Deliver prompt feedback and decisions on pull requests. +3. Be available to anyone with questions, bug reports, criticism etc. + on their component. This includes IRC, GitHub requests and the mailing + list. +4. Make sure their component respects the philosophy, design and + road map of the project. + +## How are decisions made? + +Short answer: with pull requests to the Docker repository. + +Docker is an open-source project with an open design philosophy. This +means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, road map, and APIs. *If it's +part of the project, it's in the repo. If it's in the repo, it's part of +the project.* + +As a result, all decisions can be expressed as changes to the +repository. An implementation change is a change to the source code. An +API change is a change to the API specification. A philosophy change is +a change to the philosophy manifesto, and so on. + +All decisions affecting Docker, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do +this (see below "Who decides what?") + + Accepting pull requests + - If the pull request appears to be ready to merge, give it a `LGTM`, which + stands for "Looks Good To Me". + - If the pull request has some small problems that need to be changed, make + a comment adressing the issues. + - If the changes needed to a PR are small, you can add a "LGTM once the + following comments are adressed..." this will reduce needless back and + forth. + - If the PR only needs a few changes before being merged, any MAINTAINER can + make a replacement PR that incorporates the existing commits and fixes the + problems before a fast track merge. + + Closing pull requests + - If a PR appears to be abandoned, after having attempted to contact the + original contributor, then a replacement PR may be made. Once the + replacement PR is made, any contributor may close the original one. + - If you are not sure if the pull request implements a good feature or you + do not understand the purpose of the PR, ask the contributor to provide + more documentation. If the contributor is not able to adequately explain + the purpose of the PR, the PR may be closed by any MAINTAINER. + - If a MAINTAINER feels that the pull request is sufficiently architecturally + flawed, or if the pull request needs significantly more design discussion + before being considered, the MAINTAINER should close the pull request with + a short explanation of what discussion still needs to be had. It is + important not to leave such pull requests open, as this will waste both the + MAINTAINER's time and the contributor's time. It is not good to string a + contributor on for weeks or months, having them make many changes to a PR + that will eventually be rejected. + +## Who decides what? + +All decisions are pull requests, and the relevant maintainers make +decisions by accepting or refusing pull requests. Review and acceptance +by anyone is denoted by adding a comment in the pull request: `LGTM`. +However, only currently listed `MAINTAINERS` are counted towards the +required majority. + +Docker follows the timeless, highly efficient and totally unfair system +known as [Benevolent dictator for +life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with +yours truly, Solomon Hykes, in the role of BDFL. This means that all +decisions are made, by default, by Solomon. Since making every decision +myself would be highly un-scalable, in practice decisions are spread +across multiple maintainers. + +The relevant maintainers for a pull request can be worked out in 2 steps: + +* Step 1: Determine the subdirectories affected by the pull request. This + might be `src/registry`, `docs/source/api`, or any other part of the repo. + +* Step 2: Find the `MAINTAINERS` file which affects this directory. If the + directory itself does not have a `MAINTAINERS` file, work your way up + the repo hierarchy until you find one. + +There is also a `hacks/getmaintainers.sh` script that will print out the +maintainers for a specified directory. + +### I'm a maintainer, and I'm going on holiday + +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. + +### I'm a maintainer. Should I make pull requests too? + +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. + +### Who assigns maintainers? + +Solomon has final `LGTM` approval for all pull requests to `MAINTAINERS` files. + +### How is this process changed? + +Just like everything else: by making a pull request :) diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md new file mode 100644 index 00000000..265f7d67 --- /dev/null +++ b/hack/PACKAGERS.md @@ -0,0 +1,329 @@ +# Dear Packager, + +If you are looking to make Docker available on your favorite software +distribution, this document is for you. It summarizes the requirements for +building and running the Docker client and the Docker daemon. + +## Getting Started + +We want to help you package Docker successfully. Before doing any packaging, a +good first step is to introduce yourself on the [docker-dev mailing +list](https://groups.google.com/d/forum/docker-dev), explain what you're trying +to achieve, and tell us how we can help. Don't worry, we don't bite! There might +even be someone already working on packaging for the same distro! + +You can also join the IRC channel - #docker and #docker-dev on Freenode are both +active and friendly. + +We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our +"Packagers Relations", since he's always working to make sure our packagers have +a good, healthy upstream to work with (both in our communication and in our +build scripts). If you're having any kind of trouble, feel free to ping him +directly. He also likes to keep track of what distributions we have packagers +for, so feel free to reach out to him even just to say "Hi!" + +## Package Name + +If possible, your package should be called "docker". If that name is already +taken, a second choice is "lxc-docker", but with the caveat that "LXC" is now an +optional dependency (as noted below). Another possible choice is "docker.io". + +## Official Build vs Distro Build + +The Docker project maintains its own build and release toolchain. It is pretty +neat and entirely based on Docker (surprise!). This toolchain is the canonical +way to build Docker. We encourage you to give it a try, and if the circumstances +allow you to use it, we recommend that you do. + +You might not be able to use the official build toolchain - usually because your +distribution has a toolchain and packaging policy of its own. We get it! Your +house, your rules. The rest of this document should give you the information you +need to package Docker your way, without denaturing it in the process. + +## Build Dependencies + +To build Docker, you will need the following: + +* A recent version of git and mercurial +* Go version 1.3 or later +* A clean checkout of the source added to a valid [Go + workspace](http://golang.org/doc/code.html#Workspaces) under the path + *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, + explained in more detail below). + +To build the Docker daemon, you will additionally need: + +* An amd64/x86_64 machine running Linux +* SQLite version 3.7.9 or later +* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version + 2.02.89 or later +* btrfs-progs version 3.8 or later (including commit e5cb128 from 2013-01-07) + for the necessary btrfs headers + +Be sure to also check out Docker's Dockerfile for the most up-to-date list of +these build-time dependencies. + +### Go Dependencies + +All Go dependencies are vendored under "./vendor". They are used by the official +build, so the source of truth for the current version of each dependency is +whatever is in "./vendor". + +To use the vendored dependencies, simply make sure the path to "./vendor" is +included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). + +If you would rather (or must, due to distro policy) package these dependencies +yourself, take a look at "./hack/vendor.sh" for an easy-to-parse list of the +exact version for each. + +NOTE: if you're not able to package the exact version (to the exact commit) of a +given dependency, please get in touch so we can remediate! Who knows what +discrepancies can be caused by even the slightest deviation. We promise to do +our best to make everybody happy. + +## Stripping Binaries + +Please, please, please do not strip any compiled binaries. This is really +important. + +In our own testing, stripping the resulting binaries sometimes results in a +binary that appears to work, but more often causes random panics, segfaults, and +other issues. Even if the binary appears to work, please don't strip. + +See the following quotes from Dave Cheney, which explain this position better +from the upstream Golang perspective. + +### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) + +> Super super important: Do not strip go binaries or archives. It isn't tested, +> often breaks, and doesn't work. + +### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) + +> To quote myself: "Please do not strip Go binaries, it is not supported, not +> tested, is often broken, and doesn't do what you want" +> +> To unpack that a bit +> +> * not supported, as in, we don't support it, and recommend against it when +> asked +> * not tested, we don't test stripped binaries as part of the build CI process +> * is often broken, stripping a go binary will produce anywhere from no, to +> subtle, to outright execution failure, see above + +### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) + +> To clarify my previous statements. +> +> * I do not disagree with the debian policy, it is there for a good reason +> * Having said that, it stripping Go binaries doesn't work, and nobody is +> looking at making it work, so there is that. +> +> Thanks for patching the build formula. + +## Building Docker + +Please use our build script ("./hack/make.sh") for all your compilation of +Docker. If there's something you need that it isn't doing, or something it could +be doing to make your life as a packager easier, please get in touch with Tianon +and help us rectify the situation. Chances are good that other packagers have +probably run into the same problems and a fix might already be in the works, but +none of us will know for sure unless you harass Tianon about it. :) + +All the commands listed within this section should be run with the Docker source +checkout as the current working directory. + +### `AUTO_GOPATH` + +If you'd rather not be bothered with the hassles that setting up `GOPATH` +appropriately can be, and prefer to just get a "build that works", you should +add something similar to this to whatever script or process you're using to +build Docker: + +```bash +export AUTO_GOPATH=1 +``` + +This will cause the build scripts to set up a reasonable `GOPATH` that +automatically and properly includes both docker/docker from the local +directory, and the local "./vendor" directory as necessary. + +### `DOCKER_BUILDTAGS` + +If you're building a binary that may need to be used on platforms that include +AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: +```bash +export DOCKER_BUILDTAGS='apparmor' +``` + +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to use the `selinux` build tag: +```bash +export DOCKER_BUILDTAGS='selinux' +``` + +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` + +To disable aufs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + +NOTE: if you need to set more than one build tag, space separate them: +```bash +export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' +``` + +### Static Daemon + +If it is feasible within the constraints of your distribution, you should +seriously consider packaging Docker as a single static binary. A good comparison +is Busybox, which is often packaged statically as a feature to enable mass +portability. Because of the unique way Docker operates, being similarly static +is a "feature". + +To build a static Docker daemon binary, run the following command (first +ensuring that all the necessary libraries are available in static form for +linking - see the "Build Dependencies" section above, and the relevant lines +within Docker's own Dockerfile that set up our official build environment): + +```bash +./hack/make.sh binary +``` + +This will create a static binary under +"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of +the file "./VERSION". This binary is usually installed somewhere like +"/usr/bin/docker". + +### Dynamic Daemon / Client-only Binary + +If you are only interested in a Docker client binary, set `DOCKER_CLIENTONLY` to a non-empty value using something similar to the following: (which will prevent the extra step of compiling dockerinit) + +```bash +export DOCKER_CLIENTONLY=1 +``` + +If you need to (due to distro policy, distro library availability, or for other +reasons) create a dynamically compiled daemon binary, or if you are only +interested in creating a client binary for Docker, use something similar to the +following: + +```bash +./hack/make.sh dynbinary +``` + +This will create "./bundles/$VERSION/dynbinary/docker-$VERSION", which for +client-only builds is the important file to grab and install as appropriate. + +For daemon builds, you will also need to grab and install +"./bundles/$VERSION/dynbinary/dockerinit-$VERSION", which is created from the +minimal set of Docker's codebase that _must_ be compiled statically (and is thus +a pure static binary). The acceptable locations Docker will search for this file +are as follows (in order): + +* as "dockerinit" in the same directory as the daemon binary (ie, if docker is + installed at "/usr/bin/docker", then "/usr/bin/dockerinit" will be the first + place this file is searched for) +* "/usr/libexec/docker/dockerinit" or "/usr/local/libexec/docker/dockerinit" + ([FHS 3.0 Draft](http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec)) +* "/usr/lib/docker/dockerinit" or "/usr/local/lib/docker/dockerinit" ([FHS + 2.3](http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA)) + +If (and please, only if) one of the paths above is insufficient due to distro +policy or similar issues, you may use the `DOCKER_INITPATH` environment variable +at compile-time as follows to set a different path for Docker to search: + +```bash +export DOCKER_INITPATH=/usr/lib/docker.io/dockerinit +``` + +If you find yourself needing this, please don't hesitate to reach out to Tianon +to see if it would be reasonable or helpful to add more paths to Docker's list, +especially if there's a relevant standard worth referencing (such as the FHS). + +Also, it goes without saying, but for the purposes of the daemon please consider +these two binaries ("docker" and "dockerinit") as if they were a single unit. +Mixing and matching can cause undesired consequences, and will fail to run +properly. + +## System Dependencies + +### Runtime Dependencies + +To function properly, the Docker daemon needs the following software to be +installed and available at runtime: + +* iptables version 1.4 or later +* procps (or similar provider of a "ps" executable) +* XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) + +Additionally, the Docker client needs the following software to be installed and +available at runtime: + +* Git version 1.7 or later + +### Kernel Requirements + +The Docker daemon has very specific kernel requirements. Most pre-packaged +kernels already include the necessary options enabled. If you are building your +own kernel, you will either need to discover the options necessary via trial and +error, or check out the [Gentoo +ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), +in which a list is maintained (and if there are any issues or discrepancies in +that list, please contact Tianon so they can be rectified). + +Note that in client mode, there are no specific kernel requirements, and that +the client will even run on alternative platforms such as Mac OS X / Darwin. + +### Optional Dependencies + +Some of Docker's features are activated by using optional command-line flags or +by having support for them in the kernel or userspace. A few examples include: + +* LXC execution driver (requires version 1.0 or later of the LXC utility scripts) +* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at + least the "auplink" utility from aufs-tools) +* BTRFS graph driver (requires BTRFS support enabled in the kernel) + +## Daemon Init Script + +Docker expects to run as a daemon at machine startup. Your package will need to +include a script for your distro's process supervisor of choice. Be sure to +check out the "contrib/init" folder in case a suitable init script already +exists (and if one does not, contact Tianon about whether it might be +appropriate for your distro's init script to live there too!). + +In general, Docker should be run as root, similar to the following: + +```bash +docker -d +``` + +Generally, a `DOCKER_OPTS` variable of some kind is available for adding more +flags (such as changing the graph driver to use BTRFS, switching the location of +"/var/lib/docker", etc). + +## Communicate + +As a final note, please do feel free to reach out to Tianon at any time for +pretty much anything. He really does love hearing from our packagers and wants +to make sure we're not being a "hostile upstream". As should be a given, we +appreciate the work our packagers do to make sure we have broad distribution! diff --git a/hack/PRINCIPLES.md b/hack/PRINCIPLES.md new file mode 100644 index 00000000..e59c1675 --- /dev/null +++ b/hack/PRINCIPLES.md @@ -0,0 +1,19 @@ +# Docker principles + +In the design and development of Docker we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Less components is better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between 2 options, choose the one that is easier to reverse. +* No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The less moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/hack/README.md b/hack/README.md new file mode 100644 index 00000000..207a2aa6 --- /dev/null +++ b/hack/README.md @@ -0,0 +1,24 @@ +# Hacking on Docker + +The hack/ directory holds information and tools for everyone involved in the process of creating and +distributing Docker, specifically: + +## Guides + +If you're a *contributor* or aspiring contributor, you should read CONTRIBUTORS.md. + +If you're a *maintainer* or aspiring maintainer, you should read MAINTAINERS.md. + +If you're a *packager* or aspiring packager, you should read PACKAGERS.md. + +If you're a maintainer in charge of a *release*, you should read RELEASE-CHECKLIST.md. + +## Roadmap + +A high-level roadmap is available at ROADMAP.md. + + +## Build tools + +make.sh is the primary build tool for docker. It is used for compiling the official binary, +running the test suite, and pushing releases. diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md new file mode 100644 index 00000000..250a2521 --- /dev/null +++ b/hack/RELEASE-CHECKLIST.md @@ -0,0 +1,303 @@ +# Release Checklist +## A maintainer's guide to releasing Docker + +So you're in charge of a Docker release? Cool. Here's what to do. + +If your experience deviates from this document, please document the changes +to keep it up-to-date. + +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/docker/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +export GITHUBUSER="YOUR_GITHUB_USER" +git remote add origin https://github.com/docker/docker.git +git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git +``` + +### 1. Pull from master and create a release branch + +Note: Even for major releases, all of X, Y and Z in vX.Y.Z must be specified (e.g. v1.0.0). + +```bash +export VERSION=vX.Y.Z +git fetch origin +git branch -D release || true +git checkout --track origin/release +git checkout -b bump_$VERSION +``` + +If it's a regular release, we usually merge master. +```bash +git merge origin/master +``` + +Otherwise, if it is a hotfix release, we cherry-pick only the commits we want. +```bash +# get the commits ids we want to cherry-pick +git log +# cherry-pick the commits starting from the oldest one, without including merge commits +git cherry-pick +git cherry-pick +... +``` + +### 2. Update CHANGELOG.md + +You can run this command for reference with git 2.0: + +```bash +git fetch --tags +LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If you don't have git 2.0 but have a sort command that supports `-V`: +```bash +git fetch --tags +LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. +```markdown +#### Notable features since +* New docker command to do something useful +* Remote API change (deprecating old version) +* Performance improvements in some usecases +* ... +``` + +For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. +Each change should be listed under a category heading formatted as `#### CATEGORY`. + +`CATEGORY` should describe which part of the project is affected. + Valid categories are: + * Builder + * Documentation + * Hack + * Packaging + * Remote API + * Runtime + * Other (please use this category sparingly) + +Each change should be formatted as `BULLET DESCRIPTION`, given: + +* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or + upgrade, respectively. + +* DESCRIPTION: a concise description of the change that is relevant to the + end-user, using the present tense. Changes should be described in terms + of how they affect the user, for example "Add new feature X which allows Y", + "Fix bug which caused X", "Increase performance of Y". + +EXAMPLES: + +```markdown +## 0.3.6 (1995-12-25) + +#### Builder + ++ 'docker build -t FOO .' applies the tag FOO to the newly built image + +#### Remote API + +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version +``` + +If you need a list of contributors between the last major release and the +current bump branch, use something like: +```bash +git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf +``` +Obviously, you'll need to adjust version numbers as necessary. If you just need +a count, add a simple `| wc -l`. + +### 3. Change the contents of the VERSION file + +```bash +echo ${VERSION#v} > VERSION +``` + +### 4. Test the docs + +Make sure that your tree includes documentation for any modified or +new features, syntax or semantic changes. + +To test locally: + +```bash +make docs +``` + +To make a shared test at http://beta-docs.docker.io: + +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release +``` + +### 5. Commit and create a pull request to the "release" branch + +```bash +git add VERSION CHANGELOG.md +git commit -m "Bump version to $VERSION" +git push $GITHUBUSER bump_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:release...$GITHUBUSER:bump_$VERSION?expand=1" +``` + +That last command will give you the proper link to visit to ensure that you +open the PR against the "release" branch instead of accidentally against +"master" (like so many brave souls before you already have). + +### 6. Get 2 other maintainers to validate the pull request + +### 7. Publish binaries + +To run this you will need access to the release credentials. Get them from the Core maintainers. + +Replace "..." with the respective credentials: + +```bash +docker build -t docker . +docker run \ + -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY="..." \ + -e AWS_SECRET_KEY="..." \ + -e GPG_PASSPHRASE="..." \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +It will run the test suite, build the binaries and packages, +and upload to the specified bucket (you should use test.docker.com for +general testing, and once everything is fine, switch to get.docker.com as +noted below). + +After the binaries and packages are uploaded to test.docker.com, make sure +they get tested in both Ubuntu and Debian for any obvious installation +issues or runtime issues. + +Announcing on IRC in both `#docker` and `#docker-dev` is a great way to get +help testing! An easy way to get some useful links for sharing: + +```bash +echo "Ubuntu/Debian: https://test.docker.com/ubuntu or curl -sSL https://test.docker.com/ | sh" +echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 32bit client binary: https://test.docker.com/builds/Darwin/i386/docker-${VERSION#v}" +echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" +``` + +Once they're tested and reasonably believed to be working, run against +get.docker.com: + +```bash +docker run \ + -e AWS_S3_BUCKET=get.docker.com \ + -e AWS_ACCESS_KEY="..." \ + -e AWS_SECRET_KEY="..." \ + -e GPG_PASSPHRASE="..." \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +### 8. Breakathon + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + +### 9. Apply tag + +It's very important that we don't make the tag until after the official +release is uploaded to get.docker.com! + +```bash +git tag -a $VERSION -m $VERSION bump_$VERSION +git push origin $VERSION +``` + +### 10. Go to github to merge the `bump_$VERSION` branch into release + +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! + +### 11. Update the docs branch + +If this is a MAJOR.MINOR.0 release, you need to make an branch for the previous release's +documentation: + +```bash +git checkout -b docs-$PREVIOUS_MAJOR_MINOR docs +git fetch +git reset --hard origin/docs +git push -f origin docs-$PREVIOUS_MAJOR_MINOR +``` + +You will need the `awsconfig` file added to the `docs/` directory to contain the +s3 credentials for the bucket you are deploying to. + +```bash +git checkout -b docs release || git checkout docs +git fetch +git reset --hard origin/release +git push -f origin docs +make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release +``` + +The docs will appear on http://docs.docker.com/ (though there may be cached +versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. + +Ask Sven, or JohnC to invalidate the cloudfront cache using the CND Planet chrome applet. + +### 12. Create a new pull request to merge release back into master + +```bash +git checkout master +git fetch +git reset --hard origin/master +git merge origin/release +git checkout -b merge_release_$VERSION +echo ${VERSION#v}-dev > VERSION +git add VERSION +git commit -m "Change version to $(cat VERSION)" +git push $GITHUBUSER merge_release_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" +``` + +Again, get two maintainers to validate, then merge, then push that pretty +blue button to delete your branch. + +### 13. Rejoice and Evangelize! + +Congratulations! You're done. + +Go forth and announce the glad tidings of the new release in `#docker`, +`#docker-dev`, on the [mailing list](https://groups.google.com/forum/#!forum/docker-dev), +and on Twitter! diff --git a/hack/ROADMAP.md b/hack/ROADMAP.md new file mode 100644 index 00000000..d49664b7 --- /dev/null +++ b/hack/ROADMAP.md @@ -0,0 +1,41 @@ +# Docker: what's next? + +This document is a high-level overview of where we want to take Docker next. +It is a curated selection of planned improvements which are either important, difficult, or both. + +For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/docker/issues). + +To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. + + +## Container wiring and service discovery + +In its current version, docker doesn’t make it very easy to manipulate multiple containers as a cohesive group (ie. orchestration), and it doesn’t make it seamless for containers to connect to each other as network services (ie. wiring). + +To achieve wiring and orchestration with docker today, you need to write glue scripts yourself, or use one several companion tools available, like Orchestra, Shipper, Deis, Pipeworks, etc. + +We want the Docker API to support orchestration and wiring natively, so that these tools can cleanly and seamlessly integrate into the Docker user experience, and remain interoperable with each other. + + +## Better integration with process supervisors + +For docker to be fully usable in production, it needs to cleanly integrate with the host machine’s process supervisor of choice. Whether it’s sysV-init, upstart, systemd, runit or supervisord, we want to make sure docker plays nice with your existing system. This will be a major focus of the 0.7 release. + + +## Plugin API + +We want Docker to run everywhere, and to integrate with every devops tool. Those are ambitious goals, and the only way to reach them is with the Docker community. For the community to participate fully, we need an API which allows Docker to be deeply and easily customized. + +We are working on a plugin API which will make Docker very, very customization-friendly. We believe it will facilitate the integrations listed above – and many more we didn’t even think about. + + +## Broader kernel support + +Our goal is to make Docker run everywhere, but currently Docker requires Linux version 3.8 or higher with cgroups support. If you’re deploying new machines for the purpose of running Docker, this is a fairly easy requirement to meet. However, if you’re adding Docker to an existing deployment, you may not have the flexibility to update and patch the kernel. + +Expanding Docker’s kernel support is a priority. This includes running on older kernel versions, specifically focusing on versions already popular in server deployments such as those used by RHEL and the OpenVZ stack. + + +## Cross-architecture support + +Our goal is to make Docker run everywhere. However currently Docker only runs on x86_64 systems. We plan on expanding architecture support, so that Docker containers can be created and used on more architectures. diff --git a/hack/allmaintainers.sh b/hack/allmaintainers.sh new file mode 100755 index 00000000..1ea5a9f7 --- /dev/null +++ b/hack/allmaintainers.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +find $1 -name MAINTAINERS -exec cat {} ';' | sed -E -e 's/^[^:]*: *(.*)$/\1/' | grep -E -v -e '^ *$' -e '^ *#.*$' | sort -u diff --git a/hack/dind b/hack/dind new file mode 100755 index 00000000..f8fae637 --- /dev/null +++ b/hack/dind @@ -0,0 +1,88 @@ +#!/bin/bash +set -e + +# DinD: a wrapper script which allows docker to be run inside a docker container. +# Original version by Jerome Petazzoni +# See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ +# +# This script should be executed inside a docker container in privilieged mode +# ('docker run --privileged', introduced in docker 0.6). + +# Usage: dind CMD [ARG...] + +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + +# First, make sure that cgroups are mounted correctly. +CGROUP=/cgroup + +mkdir -p "$CGROUP" + +if ! mountpoint -q "$CGROUP"; then + mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { + echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' + exit 1 + } +fi + +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and -privileged mode might break.' + } +fi + +# Mount the cgroup hierarchies exactly as they are in the parent system. +for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do + mkdir -p "$CGROUP/$SUBSYS" + if ! mountpoint -q $CGROUP/$SUBSYS; then + mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" + fi + + # The two following sections address a bug which manifests itself + # by a cryptic "lxc-start: no ns_cgroup option specified" when + # trying to start containers withina container. + # The bug seems to appear when the cgroup hierarchies are not + # mounted on the exact same directories in the host, and in the + # container. + + # Named, control-less cgroups are mounted with "-o name=foo" + # (and appear as such under /proc//cgroup) but are usually + # mounted on a directory named "foo" (without the "name=" prefix). + # Systemd and OpenRC (and possibly others) both create such a + # cgroup. To avoid the aforementioned bug, we symlink "foo" to + # "name=foo". This shouldn't have any adverse effect. + name="${SUBSYS#name=}" + if [ "$name" != "$SUBSYS" ]; then + ln -s "$SUBSYS" "$CGROUP/$name" + fi + + # Likewise, on at least one system, it has been reported that + # systemd would mount the CPU and CPU accounting controllers + # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" + # but on a directory called "cpu,cpuacct" (note the inversion + # in the order of the groups). This tries to work around it. + if [ "$SUBSYS" = 'cpuacct,cpu' ]; then + ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" + fi +done + +# Note: as I write those lines, the LXC userland tools cannot setup +# a "sub-container" properly if the "devices" cgroup is not in its +# own hierarchy. Let's detect this and issue a warning. +if ! grep -q :devices: /proc/1/cgroup; then + echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' +fi +if ! grep -qw devices /proc/1/cgroup; then + echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' +fi + +# Mount /tmp +mount -t tmpfs none /tmp + +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/hack/generate-authors.sh b/hack/generate-authors.sh new file mode 100755 index 00000000..83f61df3 --- /dev/null +++ b/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | sort -uf +} > AUTHORS diff --git a/hack/getmaintainer.sh b/hack/getmaintainer.sh new file mode 100755 index 00000000..ca532d42 --- /dev/null +++ b/hack/getmaintainer.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +set -e + +if [ $# -ne 1 ]; then + echo >&2 "Usage: $0 PATH" + echo >&2 "Show the primary and secondary maintainers for a given path" + exit 1 +fi + +set -e + +DEST=$1 +DESTFILE="" +if [ ! -d $DEST ]; then + DESTFILE=$(basename $DEST) + DEST=$(dirname $DEST) +fi + +MAINTAINERS=() +cd $DEST +while true; do + if [ -e ./MAINTAINERS ]; then + { + while read line; do + re='^([^:]*): *(.*)$' + file=$(echo $line | sed -E -n "s/$re/\1/p") + if [ ! -z "$file" ]; then + if [ "$file" = "$DESTFILE" ]; then + echo "Override: $line" + maintainer=$(echo $line | sed -E -n "s/$re/\2/p") + MAINTAINERS=("$maintainer" "${MAINTAINERS[@]}") + fi + else + MAINTAINERS+=("$line"); + fi + done; + } < MAINTAINERS + break + fi + if [ -d .git ]; then + break + fi + if [ "$(pwd)" = "/" ]; then + break + fi + cd .. +done + +PRIMARY="${MAINTAINERS[0]}" +PRIMARY_FIRSTNAME=$(echo $PRIMARY | cut -d' ' -f1) +LGTM_COUNT=${#MAINTAINERS[@]} +LGTM_COUNT=$((LGTM_COUNT%2 +1)) + +firstname() { + echo $1 | cut -d' ' -f1 +} + +echo "A pull request in $1 will need $LGTM_COUNT LGTM's to be merged." +echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1." +for SECONDARY in "${MAINTAINERS[@]:1}"; do + echo "--- $SECONDARY" +done diff --git a/hack/install.sh b/hack/install.sh new file mode 100755 index 00000000..9652e467 --- /dev/null +++ b/hack/install.sh @@ -0,0 +1,214 @@ +#!/bin/sh +set -e +# +# This script is meant for quick & easy install via: +# 'curl -sSL https://get.docker.com/ | sh' +# or: +# 'wget -qO- https://get.docker.com/ | sh' +# +# +# Docker Maintainers: +# To update this script on https://get.docker.com, +# use hack/release.sh during a normal release, +# or the following one-liner for script hotfixes: +# s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index +# + +url='https://get.docker.com/' + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +case "$(uname -m)" in + *64) + ;; + *) + echo >&2 'Error: you are not using a 64bit platform.' + echo >&2 'Docker currently only supports 64bit platforms.' + exit 1 + ;; +esac + +if command_exists docker || command_exists lxc-docker; then + echo >&2 'Warning: "docker" or "lxc-docker" command appears to already exist.' + echo >&2 'Please ensure that you do not already have docker installed.' + echo >&2 'You may press Ctrl+C now to abort this process and rectify this situation.' + ( set -x; sleep 20 ) +fi + +user="$(id -un 2>/dev/null || true)" + +sh_c='sh -c' +if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + echo >&2 'Error: this installer needs the ability to run commands as root.' + echo >&2 'We are unable to find either "sudo" or "su" available to make this happen.' + exit 1 + fi +fi + +curl='' +if command_exists curl; then + curl='curl -sSL' +elif command_exists wget; then + curl='wget -qO-' +elif command_exists busybox && busybox --list-modules | grep -q wget; then + curl='busybox wget -qO-' +fi + +# perform some very rudimentary platform detection +lsb_dist='' +if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" +fi +if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" +fi +if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then + lsb_dist='Debian' +fi +if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='Fedora' +fi + +case "$lsb_dist" in + Fedora) + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-io' + ) + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker run --rm hello-world' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + echo + echo 'If you would like to use Docker as a non-root user, you should now consider' + echo 'adding your user to the "docker" group with something like:' + echo + echo ' sudo usermod -aG docker' $your_user + echo + echo 'Remember that you will have to log out and back in for this to take effect!' + echo + exit 0 + ;; + + Ubuntu|Debian|LinuxMint) + export DEBIAN_FRONTEND=noninteractive + + did_apt_get_update= + apt_get_update() { + if [ -z "$did_apt_get_update" ]; then + ( set -x; $sh_c 'sleep 3; apt-get update' ) + did_apt_get_update=1 + fi + } + + # aufs is preferred over devicemapper; try to ensure the driver is available. + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + kern_extras="linux-image-extra-$(uname -r)" + + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true + + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' + echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' + ( set -x; sleep 10 ) + fi + fi + + # install apparmor utils if they're missing and apparmor is enabled in the kernel + # otherwise Docker will fail to start + if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + if command -v apparmor_parser &> /dev/null; then + echo 'apparmor is enabled in the kernel and apparmor utils were already installed' + else + echo 'apparmor is enabled in the kernel, but apparmor_parser missing' + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + fi + fi + + if [ ! -e /usr/lib/apt/methods/https ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https' ) + fi + if [ -z "$curl" ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl' ) + curl='curl -sSL' + fi + ( + set -x + if [ "https://get.docker.com/" = "$url" ]; then + $sh_c "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" + elif [ "https://test.docker.com/" = "$url" ]; then + $sh_c "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" + else + $sh_c "$curl ${url}gpg | apt-key add -" + fi + $sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list" + $sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker' + ) + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker run --rm hello-world' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + echo + echo 'If you would like to use Docker as a non-root user, you should now consider' + echo 'adding your user to the "docker" group with something like:' + echo + echo ' sudo usermod -aG docker' $your_user + echo + echo 'Remember that you will have to log out and back in for this to take effect!' + echo + exit 0 + ;; + + Gentoo) + if [ "$url" = "https://test.docker.com/" ]; then + echo >&2 + echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.' + echo >&2 ' The portage tree should contain the latest stable release of Docker, but' + echo >&2 ' if you want something more recent, you can always use the live ebuild' + echo >&2 ' provided in the "docker" overlay available via layman. For more' + echo >&2 ' instructions, please see the following URL:' + echo >&2 ' https://github.com/tianon/docker-overlay#using-this-overlay' + echo >&2 ' After adding the "docker" overlay, you should be able to:' + echo >&2 ' emerge -av =app-emulation/docker-9999' + echo >&2 + exit 1 + fi + + ( + set -x + $sh_c 'sleep 3; emerge app-emulation/docker' + ) + exit 0 + ;; +esac + +cat >&2 <<'EOF' + + Either your platform is not easily detectable, is not supported by this + installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have + a package for Docker. Please visit the following URL for more detailed + installation instructions: + + https://docs.docker.com/en/latest/installation/ + +EOF +exit 1 diff --git a/hack/make.sh b/hack/make.sh new file mode 100755 index 00000000..d6da3057 --- /dev/null +++ b/hack/make.sh @@ -0,0 +1,242 @@ +#!/usr/bin/env bash +set -e + +# This script builds various binary artifacts from a checkout of the docker +# source code. +# +# Requirements: +# - The current directory should be a checkout of the docker source code +# (http://github.com/docker/docker). Whatever version is checked out +# will be built. +# - The VERSION file, at the root of the repository, should exist, and +# will be used as Docker binary version and package version. +# - The hash of the git commit will also be included in the Docker binary, +# with the suffix -dirty if the repository isn't clean. +# - The script is intented to be run inside the docker container specified +# in the Dockerfile at the root of the source. In other words: +# DO NOT CALL THIS SCRIPT DIRECTLY. +# - The right way to call this script is to invoke "make" from +# your checkout of the Docker repository. +# the Makefile will do a "docker build -t docker ." and then +# "docker run hack/make.sh" in the resulting image. +# + +set -o pipefail + +export DOCKER_PKG='github.com/docker/docker' + +# We're a nice, sexy, little shell script, and people might try to run us; +# but really, they shouldn't. We want to be in a container! +if [ "$(pwd)" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then + { + echo "# WARNING! I don't seem to be running in the Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo + +# List of bundles to create when no argument is passed +DEFAULT_BUNDLES=( + validate-dco + validate-gofmt + + binary + + test-unit + test-integration + test-integration-cli + + dynbinary + dyntest-unit + dyntest-integration + + cover + cross + tgz + ubuntu +) + +VERSION=$(cat ./VERSION) +if command -v git &> /dev/null && git rev-parse &> /dev/null; then + GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + GITCOMMIT="$GITCOMMIT-dirty" + fi +elif [ "$DOCKER_GITCOMMIT" ]; then + GITCOMMIT="$DOCKER_GITCOMMIT" +else + echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' + echo >&2 ' Please either build with the .git directory accessible, or specify the' + echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' + echo >&2 ' future accountability in diagnosing build issues. Thanks!' + exit 1 +fi + +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" + ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" + export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + +if [ -z "$DOCKER_CLIENTONLY" ]; then + DOCKER_BUILDTAGS+=" daemon" +fi + +# Use these flags when compiling the tests and final binary +LDFLAGS=' + -w + -X '$DOCKER_PKG'/dockerversion.GITCOMMIT "'$GITCOMMIT'" + -X '$DOCKER_PKG'/dockerversion.VERSION "'$VERSION'" +' +LDFLAGS_STATIC='-linkmode external' +EXTLDFLAGS_STATIC='-static' +# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build +# with options like -race. +ORIG_BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" ) +BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) +# Test timeout. +: ${TIMEOUT:=30m} +TESTFLAGS+=" -test.timeout=${TIMEOUT}" + +# A few more flags that are specific just to building a completely-static binary (see hack/make/binary) +# PLEASE do not use these anywhere else. +EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" +LDFLAGS_STATIC_DOCKER=" + $LDFLAGS_STATIC + -X $DOCKER_PKG/dockerversion.IAMSTATIC true + -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" +" + +if [ "$(uname -s)" = 'FreeBSD' ]; then + # Tell cgo the compiler is Clang, not GCC + # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 + export CC=clang + + # "-extld clang" is a workaround for + # https://code.google.com/p/go/issues/detail?id=6845 + LDFLAGS="$LDFLAGS -extld clang" +fi + +# If sqlite3.h doesn't exist under /usr/include, +# check /usr/local/include also just in case +# (e.g. FreeBSD Ports installs it under the directory) +if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then + export CGO_CFLAGS='-I/usr/local/include' + export CGO_LDFLAGS='-L/usr/local/lib' +fi + +HAVE_GO_TEST_COVER= +if \ + go help testflag | grep -- -cover > /dev/null \ + && go tool -n cover > /dev/null 2>&1 \ +; then + HAVE_GO_TEST_COVER=1 +fi + +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, eg. +# +# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test +# +go_test_dir() { + dir=$1 + coverpkg=$2 + testcover=() + if [ "$HAVE_GO_TEST_COVER" ]; then + # if our current go install has -cover, we want to use it :) + mkdir -p "$DEST/coverprofiles" + coverprofile="docker${dir#.}" + coverprofile="$DEST/coverprofiles/${coverprofile//\//-}" + testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) + fi + ( + export DEST + echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" + cd "$dir" + go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS + ) +} + +# This helper function walks the current directory looking for directories +# holding certain files ($1 parameter), and prints their paths on standard +# output, one per line. +find_dirs() { + find . -not \( \ + \( \ + -wholename './vendor' \ + -o -wholename './integration' \ + -o -wholename './integration-cli' \ + -o -wholename './contrib' \ + -o -wholename './pkg/mflag/example' \ + -o -wholename './.git' \ + -o -wholename './bundles' \ + -o -wholename './docs' \ + -o -wholename './pkg/libcontainer/nsinit' \ + \) \ + -prune \ + \) -name "$1" -print0 | xargs -0n1 dirname | sort -u +} + +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + +bundle() { + bundlescript=$1 + bundle=$(basename $bundlescript) + echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" + mkdir -p bundles/$VERSION/$bundle + source $bundlescript $(pwd)/bundles/$VERSION/$bundle +} + +main() { + # We want this to fail if the bundles already exist and cannot be removed. + # This is to avoid mixing bundles from different versions of the code. + mkdir -p bundles + if [ -e "bundles/$VERSION" ]; then + echo "bundles/$VERSION already exists. Removing." + rm -fr bundles/$VERSION && mkdir bundles/$VERSION || exit 1 + echo + fi + SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + if [ $# -lt 1 ]; then + bundles=(${DEFAULT_BUNDLES[@]}) + else + bundles=($@) + fi + for bundle in ${bundles[@]}; do + bundle $SCRIPTDIR/make/$bundle + echo + done +} + +main "$@" diff --git a/hack/make/.ensure-busybox b/hack/make/.ensure-busybox new file mode 100644 index 00000000..3861faaf --- /dev/null +++ b/hack/make/.ensure-busybox @@ -0,0 +1,10 @@ +#!/bin/bash + +if ! docker inspect busybox &> /dev/null; then + if [ -d /docker-busybox ]; then + source "$(dirname "$BASH_SOURCE")/.ensure-scratch" + ( set -x; docker build -t busybox /docker-busybox ) + else + ( set -x; docker pull busybox ) + fi +fi diff --git a/hack/make/.ensure-scratch b/hack/make/.ensure-scratch new file mode 100644 index 00000000..9a9a43a0 --- /dev/null +++ b/hack/make/.ensure-scratch @@ -0,0 +1,21 @@ +#!/bin/bash + +if ! docker inspect scratch &> /dev/null; then + # let's build a "docker save" tarball for "scratch" + # see https://github.com/docker/docker/pull/5262 + # and also https://github.com/docker/docker/issues/4242 + mkdir -p /docker-scratch + ( + cd /docker-scratch + echo '{"scratch":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cf /docker-scratch.tar -C /docker-scratch . ) + ( set -x; docker load --input /docker-scratch.tar ) +fi diff --git a/hack/make/.go-compile-test-dir b/hack/make/.go-compile-test-dir new file mode 100755 index 00000000..0905f7d4 --- /dev/null +++ b/hack/make/.go-compile-test-dir @@ -0,0 +1,26 @@ +#!/bin/bash +set -e + +# Compile phase run by parallel in test-unit. No support for coverpkg + +dir=$1 +out_file="$DEST/precompiled/$dir.test" +testcover=() +if [ "$HAVE_GO_TEST_COVER" ]; then + # if our current go install has -cover, we want to use it :) + mkdir -p "$DEST/coverprofiles" + coverprofile="docker${dir#.}" + coverprofile="$DEST/coverprofiles/${coverprofile//\//-}" + testcover=( -cover -coverprofile "$coverprofile" ) # missing $coverpkg +fi +if [ "$BUILDFLAGS_FILE" ]; then + readarray -t BUILDFLAGS < "$BUILDFLAGS_FILE" +fi +( + cd "$dir" + go test "${testcover[@]}" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS -c +) +[ $? -ne 0 ] && return 1 +mkdir -p "$(dirname "$out_file")" +mv "$dir/$(basename "$dir").test" "$out_file" +echo "Precompiled: ${DOCKER_PKG}${dir#.}" diff --git a/hack/make/.validate b/hack/make/.validate new file mode 100644 index 00000000..02280915 --- /dev/null +++ b/hack/make/.validate @@ -0,0 +1,33 @@ +#!/bin/bash + +if [ -z "$VALIDATE_UPSTREAM" ]; then + # this is kind of an expensive check, so let's not do this twice if we + # are running more than one validate bundlescript + + VALIDATE_REPO='https://github.com/docker/docker.git' + VALIDATE_BRANCH='master' + + if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then + VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" + VALIDATE_BRANCH="${TRAVIS_BRANCH}" + fi + + VALIDATE_HEAD="$(git rev-parse --verify HEAD)" + + git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" + VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" + + VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" + VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" + + validate_diff() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git diff "$VALIDATE_COMMIT_DIFF" "$@" + fi + } + validate_log() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git log "$VALIDATE_COMMIT_LOG" "$@" + fi + } +fi diff --git a/hack/make/README.md b/hack/make/README.md new file mode 100644 index 00000000..6574b0ef --- /dev/null +++ b/hack/make/README.md @@ -0,0 +1,17 @@ +This directory holds scripts called by `make.sh` in the parent directory. + +Each script is named after the bundle it creates. +They should not be called directly - instead, pass it as argument to make.sh, for example: + +``` +./hack/make.sh test +./hack/make.sh binary ubuntu + +# Or to run all bundles: +./hack/make.sh +``` + +To add a bundle: + +* Create a shell-compatible file here +* Add it to $DEFAULT_BUNDLES in make.sh diff --git a/hack/make/binary b/hack/make/binary new file mode 100755 index 00000000..b97069a8 --- /dev/null +++ b/hack/make/binary @@ -0,0 +1,17 @@ +#!/bin/bash +set -e + +DEST=$1 + +go build \ + -o "$DEST/docker-$VERSION" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + " \ + ./docker +echo "Created binary: $DEST/docker-$VERSION" +ln -sf "docker-$VERSION" "$DEST/docker" + +hash_files "$DEST/docker-$VERSION" diff --git a/hack/make/cover b/hack/make/cover new file mode 100644 index 00000000..ca772d03 --- /dev/null +++ b/hack/make/cover @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +DEST="$1" + +bundle_cover() { + coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) + for p in "${coverprofiles[@]}"; do + echo + ( + set -x + go tool cover -func="$p" + ) + done +} + +if [ "$HAVE_GO_TEST_COVER" ]; then + bundle_cover 2>&1 | tee "$DEST/report.log" +else + echo >&2 'warning: the current version of go does not support -cover' + echo >&2 ' skipping test coverage report' +fi diff --git a/hack/make/cross b/hack/make/cross new file mode 100644 index 00000000..3c5cb040 --- /dev/null +++ b/hack/make/cross @@ -0,0 +1,33 @@ +#!/bin/bash +set -e + +DEST=$1 + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 +) + +# if we have our linux/amd64 version compiled, let's symlink it in +if [ -x "$DEST/../binary/docker-$VERSION" ]; then + mkdir -p "$DEST/linux/amd64" + ( + cd "$DEST/linux/amd64" + ln -s ../../../binary/* ./ + ) + echo "Created symlinks:" "$DEST/linux/amd64/"* +fi + +for platform in $DOCKER_CROSSPLATFORMS; do + ( + mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION + export GOOS=${platform%/*} + export GOARCH=${platform##*/} + if [ -z "${daemonSupporting[$platform]}" ]; then + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported + fi + source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform" + ) +done diff --git a/hack/make/dynbinary b/hack/make/dynbinary new file mode 100644 index 00000000..5064a799 --- /dev/null +++ b/hack/make/dynbinary @@ -0,0 +1,45 @@ +#!/bin/bash +set -e + +DEST=$1 + +if [ -z "$DOCKER_CLIENTONLY" ]; then + # dockerinit still needs to be a static binary, even if docker is dynamic + go build \ + -o "$DEST/dockerinit-$VERSION" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC\" + " \ + ./dockerinit + echo "Created binary: $DEST/dockerinit-$VERSION" + ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" + + hash_files "$DEST/dockerinit-$VERSION" + + sha1sum= + if command -v sha1sum &> /dev/null; then + sha1sum=sha1sum + elif command -v shasum &> /dev/null; then + # Mac OS X - why couldn't they just use the same command name and be happy? + sha1sum=shasum + else + echo >&2 'error: cannot find sha1sum command or equivalent' + exit 1 + fi + + # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another + export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" +else + # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) + export DOCKER_INITSHA1="" +fi +# exported so that "dyntest" can easily access it later without recalculating it + +( + export LDFLAGS_STATIC_DOCKER="-X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X $DOCKER_PKG/dockerversion.INITPATH \"$DOCKER_INITPATH\"" + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + source "$(dirname "$BASH_SOURCE")/binary" +) diff --git a/hack/make/dyntest-integration b/hack/make/dyntest-integration new file mode 100644 index 00000000..1cc7349a --- /dev/null +++ b/hack/make/dyntest-integration @@ -0,0 +1,18 @@ +#!/bin/bash +set -e + +DEST=$1 +INIT=$DEST/../dynbinary/dockerinit-$VERSION + +if [ ! -x "$INIT" ]; then + echo >&2 'error: dynbinary must be run before dyntest-integration' + false +fi + +( + export TEST_DOCKERINIT_PATH="$INIT" + export LDFLAGS_STATIC_DOCKER=" + -X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" + " + source "$(dirname "$BASH_SOURCE")/test-integration" +) diff --git a/hack/make/dyntest-unit b/hack/make/dyntest-unit new file mode 100644 index 00000000..cffef985 --- /dev/null +++ b/hack/make/dyntest-unit @@ -0,0 +1,18 @@ +#!/bin/bash +set -e + +DEST=$1 +INIT=$DEST/../dynbinary/dockerinit-$VERSION + +if [ ! -x "$INIT" ]; then + echo >&2 'error: dynbinary must be run before dyntest-unit' + false +fi + +( + export TEST_DOCKERINIT_PATH="$INIT" + export LDFLAGS_STATIC_DOCKER=" + -X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" + " + source "$(dirname "$BASH_SOURCE")/test-unit" +) diff --git a/hack/make/test-integration b/hack/make/test-integration new file mode 100644 index 00000000..b49ae595 --- /dev/null +++ b/hack/make/test-integration @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +DEST=$1 + +bundle_test_integration() { + LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir ./integration \ + "-coverpkg $(find_dirs '*.go' | sed 's,^\.,'$DOCKER_PKG',g' | paste -d, -s)" +} + +# this "grep" hides some really irritating warnings that "go test -coverpkg" +# spews when it is given packages that aren't used +exec > >(tee -a $DEST/test.log) 2>&1 +bundle_test_integration 2>&1 \ + | grep --line-buffered -v '^warning: no packages being tested depend on ' diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli new file mode 100644 index 00000000..e371fac0 --- /dev/null +++ b/hack/make/test-integration-cli @@ -0,0 +1,46 @@ +#!/bin/bash +set -e + +DEST=$1 + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} + +bundle_test_integration_cli() { + go_test_dir ./integration-cli +} + +# subshell so that we can export PATH without breaking other things +exec > >(tee -a $DEST/test.log) 2>&1 +( + export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" + + if ! command -v docker &> /dev/null; then + echo >&2 'error: binary or dynbinary must be run before test-integration-cli' + false + fi + + # intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers + exec 41>&1 42>&2 + + ( set -x; exec \ + docker --daemon --debug \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --exec-driver "$DOCKER_EXECDRIVER" \ + --pidfile "$DEST/docker.pid" \ + &> "$DEST/docker.log" + ) & + + # pull the busybox image before running the tests + sleep 2 + + source "$(dirname "$BASH_SOURCE")/.ensure-busybox" + + bundle_test_integration_cli + + for pid in $(find "$DEST" -name docker.pid); do + DOCKER_PID=$(set -x; cat "$pid") + ( set -x; kill $DOCKER_PID ) + wait $DOCKERD_PID || true + done +) diff --git a/hack/make/test-unit b/hack/make/test-unit new file mode 100644 index 00000000..5040e37d --- /dev/null +++ b/hack/make/test-unit @@ -0,0 +1,84 @@ +#!/bin/bash +set -e + +DEST=$1 +: ${PARALLEL_JOBS:=$(nproc)} + +RED=$'\033[31m' +GREEN=$'\033[32m' +TEXTRESET=$'\033[0m' # reset the foreground colour + +# Run Docker's test suite, including sub-packages, and store their output as a bundle +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, eg. +# +# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test-unit +# +bundle_test_unit() { + { + date + + # Run all the tests if no TESTDIRS were specified. + if [ -z "$TESTDIRS" ]; then + TESTDIRS=$(find_dirs '*_test.go') + fi + + if command -v parallel &> /dev/null; then ( + # accomodate parallel to be able to access variables + export SHELL="$BASH" + export HOME="$(mktemp -d)" + mkdir -p "$HOME/.parallel" + touch "$HOME/.parallel/ignored_vars" + export LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" + export TESTFLAGS + export HAVE_GO_TEST_COVER + export DEST + # some hack to export array variables + export BUILDFLAGS_FILE="$HOME/buildflags_file" + ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE" + + echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --halt 2 --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" + rm -rf "$HOME" + ) else + # aww, no "parallel" available - fall back to boring + for test_dir in $TESTDIRS; do + "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" + done + fi + echo "$TESTDIRS" | go_run_test_dir + } +} + +go_run_test_dir() { + TESTS_FAILED=() + while read dir; do + echo + echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" + precompiled="$DEST/precompiled/$dir.test" + if ! ( cd "$dir" && "$precompiled" $TESTFLAGS ); then + TESTS_FAILED+=("$dir") + echo + echo "${RED}Tests failed: $dir${TEXTRESET}" + sleep 1 # give it a second, so observers watching can take note + fi + done + + echo + echo + echo + + # if some tests fail, we want the bundlescript to fail, but we want to + # try running ALL the tests first, hence TESTS_FAILED + if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then + echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}" + echo + false + else + echo "${GREEN}Test success${TEXTRESET}" + echo + true + fi +} + +exec > >(tee -a $DEST/test.log) 2>&1 +bundle_test_unit diff --git a/hack/make/tgz b/hack/make/tgz new file mode 100644 index 00000000..12033997 --- /dev/null +++ b/hack/make/tgz @@ -0,0 +1,31 @@ +#!/bin/bash + +DEST="$1" +CROSS="$DEST/../cross" + +set -e + +if [ ! -d "$CROSS/linux/amd64" ]; then + echo >&2 'error: binary and cross must be run before tgz' + false +fi + +for d in "$CROSS/"*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + mkdir -p "$DEST/$GOOS/$GOARCH" + TGZ="$DEST/$GOOS/$GOARCH/docker-$VERSION.tgz" + + mkdir -p "$DEST/build" + + mkdir -p "$DEST/build/usr/local/bin" + cp -L "$d/docker-$VERSION" "$DEST/build/usr/local/bin/docker" + + tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr + + hash_files "$TGZ" + + rm -rf "$DEST/build" + + echo "Created tgz: $TGZ" +done diff --git a/hack/make/ubuntu b/hack/make/ubuntu new file mode 100644 index 00000000..98ec4230 --- /dev/null +++ b/hack/make/ubuntu @@ -0,0 +1,176 @@ +#!/bin/bash + +DEST=$1 + +PKGVERSION="$VERSION" +if [ -n "$(git status --porcelain)" ]; then + PKGVERSION="$PKGVERSION-$(date +%Y%m%d%H%M%S)-$GITCOMMIT" +fi + +PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" +PACKAGE_URL="http://www.docker.com/" +PACKAGE_MAINTAINER="support@docker.com" +PACKAGE_DESCRIPTION="Linux container runtime +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. +Docker is a great building block for automating distributed systems: +large-scale web deployments, database clusters, continuous deployment systems, +private PaaS, service-oriented architectures, etc." +PACKAGE_LICENSE="Apache-2.0" + +# Build docker as an ubuntu package using FPM and REPREPRO (sue me). +# bundle_binary must be called first. +bundle_ubuntu() { + DIR=$DEST/build + + # Include our udev rules + mkdir -p $DIR/etc/udev/rules.d + cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ + + # Include our init scripts + mkdir -p $DIR/etc/init + cp contrib/init/upstart/docker.conf $DIR/etc/init/ + mkdir -p $DIR/etc/init.d + cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/ + mkdir -p $DIR/etc/default + cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker + mkdir -p $DIR/lib/systemd/system + cp contrib/init/systemd/docker.{service,socket} $DIR/lib/systemd/system/ + + # Include contributed completions + mkdir -p $DIR/etc/bash_completion.d + cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/ + mkdir -p $DIR/usr/share/zsh/vendor-completions + cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/ + mkdir -p $DIR/etc/fish/completions + cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/ + + # Include contributed man pages + docs/man/md2man-all.sh -q + manRoot="$DIR/usr/share/man" + mkdir -p "$manRoot" + for manDir in docs/man/man?; do + manBase="$(basename "$manDir")" # "man1" + for manFile in "$manDir"/*; do + manName="$(basename "$manFile")" # "docker-build.1" + mkdir -p "$manRoot/$manBase" + gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" + done + done + + # Copy the binary + # This will fail if the binary bundle hasn't been built + mkdir -p $DIR/usr/bin + cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker + + # Generate postinst/prerm/postrm scripts + cat > $DEST/postinst <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi + +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi +if [ -n "$2" ]; then + _dh_action=restart +else + _dh_action=start +fi +service docker $_dh_action 2>/dev/null || true + +#DEBHELPER# +EOF + cat > $DEST/prerm <<'EOF' +#!/bin/sh +set -e +set -u + +service docker stop 2>/dev/null || true + +#DEBHELPER# +EOF + cat > $DEST/postrm <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = "purge" ] ; then + update-rc.d docker remove > /dev/null || true +fi + +# In case this system is running systemd, we make systemd reload the unit files +# to pick up changes. +if [ -d /run/systemd/system ] ; then + systemctl --system daemon-reload > /dev/null || true +fi + +#DEBHELPER# +EOF + # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way + chmod +x $DEST/postinst $DEST/prerm $DEST/postrm + + ( + # switch directories so we create *.deb in the right folder + cd $DEST + + # create lxc-docker-VERSION package + fpm -s dir -C $DIR \ + --name lxc-docker-$VERSION --version $PKGVERSION \ + --after-install $DEST/postinst \ + --before-remove $DEST/prerm \ + --after-remove $DEST/postrm \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package + fpm -s empty \ + --name lxc-docker --version $PKGVERSION \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb + ) + + # clean up after ourselves so we have a clean output directory + rm $DEST/postinst $DEST/prerm $DEST/postrm + rm -r $DIR +} + +bundle_ubuntu diff --git a/hack/make/validate-dco b/hack/make/validate-dco new file mode 100644 index 00000000..1c75d91b --- /dev/null +++ b/hack/make/validate-dco @@ -0,0 +1,56 @@ +#!/bin/bash + +source "$(dirname "$BASH_SOURCE")/.validate" + +adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') +dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') +notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" + +: ${adds:=0} +: ${dels:=0} + +# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" +githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + +# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work +dcoPrefix='Signed-off-by:' +dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" + +check_dco() { + grep -qE "$dcoRegex" +} + +if [ $adds -eq 0 -a $dels -eq 0 ]; then + echo '0 adds, 0 deletions; nothing to validate! :)' +elif [ -z "$notDocs" -a $adds -le 1 -a $dels -le 1 ]; then + echo 'Congratulations! DCO small-patch-exception material!' +else + commits=( $(validate_log --format='format:%H%n') ) + badCommits=() + for commit in "${commits[@]}"; do + if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then + # no content (ie, Merge commit, etc) + continue + fi + if ! git log -1 --format='format:%B' "$commit" | check_dco; then + badCommits+=( "$commit" ) + fi + done + if [ ${#badCommits[@]} -eq 0 ]; then + echo "Congratulations! All commits are properly signed with the DCO!" + else + { + echo "These commits do not have a proper '$dcoPrefix' marker:" + for commit in "${badCommits[@]}"; do + echo " - $commit" + done + echo + echo 'Please amend each commit to include a properly formatted DCO marker.' + echo + echo 'Visit the following URL for information about the Docker DCO:' + echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' + echo + } >&2 + false + fi +fi diff --git a/hack/make/validate-gofmt b/hack/make/validate-gofmt new file mode 100644 index 00000000..8fc88cc5 --- /dev/null +++ b/hack/make/validate-gofmt @@ -0,0 +1,30 @@ +#!/bin/bash + +source "$(dirname "$BASH_SOURCE")/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff --git a/hack/release.sh b/hack/release.sh new file mode 100755 index 00000000..1174e0cc --- /dev/null +++ b/hack/release.sh @@ -0,0 +1,389 @@ +#!/usr/bin/env bash +set -e + +# This script looks for bundles built by make.sh, and releases them on a +# public S3 bucket. +# +# Bundles should be available for the VERSION string passed as argument. +# +# The correct way to call this script is inside a container built by the +# official Dockerfile at the root of the Docker source code. The Dockerfile, +# make.sh and release.sh should all be from the same source code revision. + +set -o pipefail + +# Print a usage message and exit. +usage() { + cat >&2 <<'EOF' +To run, I need: +- to be in a container generated by the Dockerfile at the top of the Docker + repository; +- to be provided with the name of an S3 bucket, in environment variable + AWS_S3_BUCKET; +- to be provided with AWS credentials for this S3 bucket, in environment + variables AWS_ACCESS_KEY and AWS_SECRET_KEY; +- the passphrase to unlock the GPG key which will sign the deb packages + (passed as environment variable GPG_PASSPHRASE); +- a generous amount of good will and nice manners. +The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" + +docker run -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY=... \ + -e AWS_SECRET_KEY=... \ + -e GPG_PASSPHRASE=... \ + -i -t --privileged \ + docker ./hack/release.sh +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage +[ "$AWS_ACCESS_KEY" ] || usage +[ "$AWS_SECRET_KEY" ] || usage +[ "$GPG_PASSPHRASE" ] || usage +[ -d /go/src/github.com/docker/docker ] || usage +cd /go/src/github.com/docker/docker +[ -x hack/make.sh ] || usage + +RELEASE_BUNDLES=( + binary + cross + tgz + ubuntu +) + +if [ "$1" != '--release-regardless-of-test-failure' ]; then + RELEASE_BUNDLES=( + test-unit test-integration + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) +fi + +VERSION=$(cat VERSION) +BUCKET=$AWS_S3_BUCKET + +# These are the 2 keys we've used to sign the deb's +# release (get.docker.com) +# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" +# test (test.docker.com) +# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" + +setup_s3() { + # Try creating the bucket. Ignore errors (it might already exist). + s3cmd mb s3://$BUCKET 2>/dev/null || true + # Check access to the bucket. + # s3cmd has no useful exit status, so we cannot check that. + # Instead, we check if it outputs anything on standard output. + # (When there are problems, it uses standard error instead.) + s3cmd info s3://$BUCKET | grep -q . + # Make the bucket accessible through website endpoints. + s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET +} + +# write_to_s3 uploads the contents of standard input to the specified S3 url. +write_to_s3() { + DEST=$1 + F=`mktemp` + cat > $F + s3cmd --acl-public --mime-type='text/plain' put $F $DEST + rm -f $F +} + +s3_url() { + case "$BUCKET" in + get.docker.com|test.docker.com) + echo "https://$BUCKET" + ;; + *) + s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' + ;; + esac +} + +build_all() { + if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then + echo >&2 + echo >&2 'The build or tests appear to have failed.' + echo >&2 + echo >&2 'You, as the release maintainer, now have a couple options:' + echo >&2 '- delay release and fix issues' + echo >&2 '- delay release and fix issues' + echo >&2 '- did we mention how important this is? issues need fixing :)' + echo >&2 + echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' + echo >&2 ' really knows all the hairy problems at hand with the current release' + echo >&2 ' issues) may bypass this checking by running this script again with the' + echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' + echo >&2 ' running the test suite, and will only build the binaries and packages. Please' + echo >&2 ' avoid using this if at all possible.' + echo >&2 + echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' + echo >&2 ' should be used. If there are release issues, we should always err on the' + echo >&2 ' side of caution.' + echo >&2 + exit 1 + fi +} + +upload_release_build() { + src="$1" + dst="$2" + latest="$3" + + echo + echo "Uploading $src" + echo " to $dst" + echo + s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" + if [ "$latest" ]; then + echo + echo "Copying to $latest" + echo + s3cmd --acl-public cp "$dst" "$latest" + fi + + # get hash files too (see hash_files() in hack/make.sh) + for hashAlgo in md5 sha256; do + if [ -e "$src.$hashAlgo" ]; then + echo + echo "Uploading $src.$hashAlgo" + echo " to $dst.$hashAlgo" + echo + s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" + if [ "$latest" ]; then + echo + echo "Copying to $latest.$hashAlgo" + echo + s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" + fi + fi + done +} + +release_build() { + GOOS=$1 + GOARCH=$2 + + binDir=bundles/$VERSION/cross/$GOOS/$GOARCH + tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH + binary=docker-$VERSION + tgz=docker-$VERSION.tgz + + latestBase= + if [ -z "$NOLATEST" ]; then + latestBase=docker-latest + fi + + # we need to map our GOOS and GOARCH to uname values + # see https://en.wikipedia.org/wiki/Uname + # ie, GOOS=linux -> "uname -s"=Linux + + s3Os=$GOOS + case "$s3Os" in + darwin) + s3Os=Darwin + ;; + freebsd) + s3Os=FreeBSD + ;; + linux) + s3Os=Linux + ;; + *) + echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" + exit 1 + ;; + esac + + s3Arch=$GOARCH + case "$s3Arch" in + amd64) + s3Arch=x86_64 + ;; + 386) + s3Arch=i386 + ;; + arm) + s3Arch=armel + # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too + ;; + *) + echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" + exit 1 + ;; + esac + + s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch + latest= + latestTgz= + if [ "$latestBase" ]; then + latest="$s3Dir/$latestBase" + latestTgz="$s3Dir/$latestBase.tgz" + fi + + if [ ! -x "$binDir/$binary" ]; then + echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" + exit 1 + fi + if [ ! -f "$tgzDir/$tgz" ]; then + echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" + exit 1 + fi + + upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" + upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" +} + +# Upload the 'ubuntu' bundle to S3: +# 1. A full APT repository is published at $BUCKET/ubuntu/ +# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index +release_ubuntu() { + [ -e bundles/$VERSION/ubuntu ] || { + echo >&2 './hack/make.sh must be run before release_ubuntu' + exit 1 + } + + # Sign our packages + dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \ + --sign builder bundles/$VERSION/ubuntu/*.deb + + # Setup the APT repo + APTDIR=bundles/$VERSION/ubuntu/apt + mkdir -p $APTDIR/conf $APTDIR/db + s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true + cat > $APTDIR/conf/distributions < bundles/$VERSION/ubuntu/gpg + s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg + + local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9 + if [[ $BUCKET == test* ]]; then + gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6 + fi + + # Upload repo + s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ + cat < /etc/apt/sources.list.d/docker.list + +# Then import the repository key +apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys $gpgFingerprint + +# Install docker +apt-get update +apt-get install -y lxc-docker + +# +# Alternatively, just use the curl-able install.sh script provided at $(s3_url) +# +EOF + + # Add redirect at /ubuntu/info for URL-backwards-compatibility + rm -rf /tmp/emptyfile && touch /tmp/emptyfile + s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info + + echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu" +} + +# Upload binaries and tgz files to S3 +release_binaries() { + [ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || { + echo >&2 './hack/make.sh must be run before release_binaries' + exit 1 + } + + for d in bundles/$VERSION/cross/*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + release_build "$GOOS" "$GOARCH" + done + + # TODO create redirect from builds/*/i686 to builds/*/i386 + + cat </dev/null || { + gpg --gen-key --batch <= MaxImageDepth { + return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) + } + return nil +} + +// Build an Image object from raw json data +func NewImgJSON(src []byte) (*Image, error) { + ret := &Image{} + + log.Debugf("Json string: {%s}", src) + // FIXME: Is there a cleaner way to "purify" the input json? + if err := json.Unmarshal(src, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/integration-cli/MAINTAINERS b/integration-cli/MAINTAINERS new file mode 100644 index 00000000..6dde4769 --- /dev/null +++ b/integration-cli/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile new file mode 100644 index 00000000..d63e8538 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] diff --git a/integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file b/integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile new file mode 100644 index 00000000..45df77e5 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile @@ -0,0 +1,8 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file b/integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile b/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile new file mode 100644 index 00000000..e6bc0c0d --- /dev/null +++ b/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +COPY https://index.docker.io/robots.txt / diff --git a/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile new file mode 100644 index 00000000..b4f319f8 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile @@ -0,0 +1,2 @@ +FROM scratch +COPY . / diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile b/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile new file mode 100644 index 00000000..4143e659 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile @@ -0,0 +1,17 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 https://docker.com/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file1 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file1 new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file2 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file2 new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file3 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file3 new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file4 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file4 new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile new file mode 100644 index 00000000..520d356c --- /dev/null +++ b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile @@ -0,0 +1,7 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN chown -R dockerio.dockerio /exists +COPY test_file1 /exists/ +ADD test_file2 test_file3 /exists/test_file1 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file1 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file1 new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file2 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file2 new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file3 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file3 new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile new file mode 100644 index 00000000..3edfe661 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile new file mode 100644 index 00000000..33b65a62 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file new file mode 100644 index 00000000..e69de29b diff --git a/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile new file mode 100644 index 00000000..38fd0902 --- /dev/null +++ b/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile new file mode 100644 index 00000000..ba2d797e --- /dev/null +++ b/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox +COPY test_file . diff --git a/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile new file mode 100644 index 00000000..91be29fe --- /dev/null +++ b/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile @@ -0,0 +1,11 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go new file mode 100644 index 00000000..42258d7a --- /dev/null +++ b/integration-cli/docker_api_inspect_test.go @@ -0,0 +1,58 @@ +package main + +import ( + "encoding/json" + "fmt" + "os/exec" + "testing" +) + +func TestInspectApiContainerResponse(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + // test on json marshal version + // and latest version + testVersions := []string{"v1.11", "latest"} + + for _, testVersion := range testVersions { + endpoint := "/containers/" + cleanedContainerID + "/json" + if testVersion != "latest" { + endpoint = "/" + testVersion + endpoint + } + body, err := sockRequest("GET", endpoint) + if err != nil { + t.Fatalf("sockRequest failed for %s version: %v", testVersion, err) + } + + var inspectJSON map[string]interface{} + if err = json.Unmarshal(body, &inspectJSON); err != nil { + t.Fatalf("unable to unmarshal body for %s version: %v", testVersion, err) + } + + keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW"} + + if testVersion == "v1.11" { + keys = append(keys, "ID") + } else { + keys = append(keys, "Id") + } + + for _, key := range keys { + if _, ok := inspectJSON[key]; !ok { + t.Fatalf("%s does not exist in reponse for %s version", key, testVersion) + } + } + //Issue #6830: type not properly converted to JSON/back + if _, ok := inspectJSON["Path"].(bool); ok { + t.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling") + } + } + + deleteAllContainers() + + logDone("container json - check keys in container json response") +} diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go new file mode 100644 index 00000000..510f02ab --- /dev/null +++ b/integration-cli/docker_cli_attach_test.go @@ -0,0 +1,89 @@ +package main + +import ( + "io" + "os/exec" + "strings" + "sync" + "testing" + "time" +) + +const attachWait = 5 * time.Second + +func TestAttachMultipleAndRestart(t *testing.T) { + defer deleteAllContainers() + + endGroup := &sync.WaitGroup{} + startGroup := &sync.WaitGroup{} + endGroup.Add(3) + startGroup.Add(3) + + if err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done"); err != nil { + t.Fatal(err) + } + + startDone := make(chan struct{}) + endDone := make(chan struct{}) + + go func() { + endGroup.Wait() + close(endDone) + }() + + go func() { + startGroup.Wait() + close(startDone) + }() + + for i := 0; i < 3; i++ { + go func() { + c := exec.Command(dockerBinary, "attach", "attacher") + + defer func() { + c.Wait() + endGroup.Done() + }() + + out, err := c.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if _, err := startCommand(c); err != nil { + t.Fatal(err) + } + + buf := make([]byte, 1024) + + if _, err := out.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + startGroup.Done() + + if !strings.Contains(string(buf), "hello") { + t.Fatalf("unexpected output %s expected hello\n", string(buf)) + } + }() + } + + select { + case <-startDone: + case <-time.After(attachWait): + t.Fatalf("Attaches did not initialize properly") + } + + cmd := exec.Command(dockerBinary, "kill", "attacher") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + select { + case <-endDone: + case <-time.After(attachWait): + t.Fatalf("Attaches did not finish properly") + } + + logDone("attach - multiple attach") +} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go new file mode 100644 index 00000000..408b8016 --- /dev/null +++ b/integration-cli/docker_cli_build_test.go @@ -0,0 +1,3499 @@ +package main + +import ( + "archive/tar" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" +) + +func TestBuildShCmdJSONEntrypoint(t *testing.T) { + name := "testbuildshcmdjsonentrypoint" + defer deleteImages(name) + + _, err := buildImage( + name, + ` + FROM busybox + ENTRYPOINT ["/bin/echo"] + CMD echo test + `, + true) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput( + exec.Command( + dockerBinary, + "run", + name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "/bin/sh -c echo test" { + t.Fatal("CMD did not contain /bin/sh -c") + } + + logDone("build - CMD should always contain /bin/sh -c when specified without JSON") +} + +func TestBuildEnvironmentReplacementUser(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV user foo + USER ${user} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.User") + if err != nil { + t.Fatal(err) + } + + if res != `"foo"` { + t.Fatal("User foo from environment not in Config.User on image") + } + + logDone("build - user environment replacement") +} + +func TestBuildEnvironmentReplacementVolume(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV volume /quux + VOLUME ${volume} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + var volumes map[string]interface{} + + if err := json.Unmarshal([]byte(res), &volumes); err != nil { + t.Fatal(err) + } + + if _, ok := volumes["/quux"]; !ok { + t.Fatal("Volume /quux from environment not in Config.Volumes on image") + } + + logDone("build - volume environment replacement") +} + +func TestBuildEnvironmentReplacementExpose(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV port 80 + EXPOSE ${port} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + + var exposedPorts map[string]interface{} + + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + t.Fatal(err) + } + + if _, ok := exposedPorts["80/tcp"]; !ok { + t.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image") + } + + logDone("build - expose environment replacement") +} + +func TestBuildEnvironmentReplacementWorkdir(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `, true) + + if err != nil { + t.Fatal(err) + } + + logDone("build - workdir environment replacement") +} + +func TestBuildEnvironmentReplacementAddCopy(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + ctx, err := fakeContext(` + FROM scratch + ENV baz foo + ENV quux bar + ENV dot . + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + `, + map[string]string{ + "foo": "test1", + "bar": "test2", + }) + + if err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + logDone("build - add/copy environment replacement") +} + +func TestBuildEnvironmentReplacementEnv(t *testing.T) { + name := "testbuildenvironmentreplacement" + + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM scratch + ENV foo foo + ENV bar ${foo} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Env") + if err != nil { + t.Fatal(err) + } + + envResult := []string{} + + if err = unmarshalJSON([]byte(res), &envResult); err != nil { + t.Fatal(err) + } + + found := false + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "foo" { + t.Fatal("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1]) + } + } + } + + if !found { + t.Fatal("Never found the `bar` env variable") + } + + logDone("build - env environment replacement") +} + +func TestBuildHandleEscapes(t *testing.T) { + name := "testbuildhandleescapes" + + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME ${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + var result map[string]map[string]struct{} + + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result["bar"]; !ok { + t.Fatal("Could not find volume bar set from env foo in volumes table") + } + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err = inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result["${FOO}"]; !ok { + t.Fatal("Could not find volume ${FOO} set from env foo in volumes table") + } + + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \\\\\\\${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err = inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result[`\\\\\\${FOO}`]; !ok { + t.Fatal(`Could not find volume \\\\\\${FOO} set from env foo in volumes table`) + } + + logDone("build - handle escapes") +} + +func TestBuildOnBuildLowercase(t *testing.T) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + defer deleteImages(name, name2) + + _, err := buildImage(name, + ` + FROM busybox + onbuild run echo quux + `, true) + + if err != nil { + t.Fatal(err) + } + + _, out, err := buildImageWithOut(name2, fmt.Sprintf(` + FROM %s + `, name), true) + + if err != nil { + t.Fatal(err) + } + + if !strings.Contains(out, "quux") { + t.Fatalf("Did not receive the expected echo text, got %s", out) + } + + if strings.Contains(out, "ONBUILD ONBUILD") { + t.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + } + + logDone("build - handle case-insensitive onbuild statement") +} + +func TestBuildEnvEscapes(t *testing.T) { + name := "testbuildenvescapes" + defer deleteAllContainers() + defer deleteImages(name) + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo \$ + `, + true) + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "$" { + t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + + logDone("build - env should handle \\$ properly") +} + +func TestBuildEnvOverwrite(t *testing.T) { + name := "testbuildenvoverwrite" + defer deleteAllContainers() + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo ${TEST} + `, + true) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-e", "TEST=bar", "-t", name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "bar" { + t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + + logDone("build - env should overwrite builder ENV during run") +} + +func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { + name := "testbuildonbuildforbiddenmaintainerinsourceimage" + defer deleteImages(name) + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(createCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild") + + if _, err := runCommand(commitCmd); err != nil { + t.Fatal(err) + } + + _, err = buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") { + t.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden maintainer in source image") + +} + +func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { + name := "testbuildonbuildforbiddenfrominsourceimage" + defer deleteImages(name) + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(createCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild") + + if _, err := runCommand(commitCmd); err != nil { + t.Fatal(err) + } + + _, err = buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") { + t.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden from in source image") + +} + +func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { + name := "testbuildonbuildforbiddenchainedinsourceimage" + defer deleteImages(name) + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(createCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild") + + if _, err := runCommand(commitCmd); err != nil { + t.Fatal(err) + } + + _, err = buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { + t.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden chained in source image") + +} + +func TestBuildOnBuildCmdEntrypointJSON(t *testing.T) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + defer deleteAllContainers() + defer deleteImages(name2) + defer deleteImages(name1) + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD CMD ["hello world"] +ONBUILD ENTRYPOINT ["echo"] +ONBUILD RUN ["true"]`, + false) + + if err != nil { + t.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) + if err != nil { + t.Fatal(err) + } + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + t.Fatal("did not get echo output from onbuild", out) + } + + logDone("build - onbuild with json entrypoint/cmd") +} + +func TestBuildOnBuildEntrypointJSON(t *testing.T) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + defer deleteAllContainers() + defer deleteImages(name2) + defer deleteImages(name1) + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD ENTRYPOINT ["echo"]`, + false) + + if err != nil { + t.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) + if err != nil { + t.Fatal(err) + } + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + t.Fatal("got malformed output from onbuild", out) + } + + logDone("build - onbuild with json entrypoint") +} + +func TestBuildCacheADD(t *testing.T) { + name := "testbuildtwoimageswithadd" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + if _, err := buildImage(name, + fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL), + true); err != nil { + t.Fatal(err) + } + _, out, err := buildImageWithOut(name, + fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + if strings.Contains(out, "Using cache") { + t.Fatal("2nd build used cache on ADD, it shouldn't") + } + + logDone("build - build two images with remote ADD") +} + +func TestBuildSixtySteps(t *testing.T) { + name := "foobuildsixtysteps" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\n"+strings.Repeat("ADD foo /\n", 60), + map[string]string{ + "foo": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - build an image with sixty build steps") +} + +func TestBuildAddSingleFileToRoot(t *testing.T) { + name := "testaddimg" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add single file to root") +} + +// Issue #3960: "ADD src ." hangs +func TestBuildAddSingleFileToWorkdir(t *testing.T) { + name := "testaddsinglefiletoworkdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +ADD test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + done := make(chan struct{}) + go func() { + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + close(done) + }() + select { + case <-time.After(5 * time.Second): + t.Fatal("Build with adding to workdir timed out") + case <-done: + } + logDone("build - add single file to workdir") +} + +func TestBuildAddSingleFileToExistDir(t *testing.T) { + name := "testaddsinglefiletoexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add single file to existing dir") +} + +func TestBuildCopyAddMultipleFiles(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "MultipleFiles") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - mulitple file copy/add tests") +} + +func TestBuildAddMultipleFilesToFile(t *testing.T) { + name := "testaddmultiplefilestofile" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + ADD file1.txt file2.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + + logDone("build - multiple add files to file") +} + +func TestBuildAddMultipleFilesToFileWild(t *testing.T) { + name := "testaddmultiplefilestofilewild" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + ADD file*.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + + logDone("build - multiple add files to file wild") +} + +func TestBuildCopyMultipleFilesToFile(t *testing.T) { + name := "testcopymultiplefilestofile" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + COPY file1.txt file2.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + + logDone("build - multiple copy files to file") +} + +func TestBuildCopyWildcard(t *testing.T) { + name := "testcopywildcard" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + COPY file*.txt /tmp/ + RUN ls /tmp/file1.txt /tmp/file2.txt + RUN mkdir /tmp1 + COPY dir* /tmp1/ + RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file + RUN mkdir /tmp2 + ADD dir/*dir %s/robots.txt /tmp2/ + RUN ls /tmp2/nest_nest_file /tmp2/robots.txt + `, server.URL), + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test2", + "dir/nested_file": "nested file", + "dir/nested_dir/nest_nest_file": "2 times nested", + "dirt": "dirty", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + // Now make sure we use a cache the 2nd time + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + if id1 != id2 { + t.Fatal("didn't use the cache") + } + + logDone("build - copy wild card") +} + +func TestBuildCopyWildcardNoFind(t *testing.T) { + name := "testcopywildcardnofind" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox + COPY file*.txt /tmp/ + `, nil) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + _, err = buildImageFromContext(name, ctx, true) + if err == nil { + t.Fatal("should have failed to find a file") + } + if !strings.Contains(err.Error(), "No source files were specified") { + t.Fatalf("Wrong error %v, must be about no source files", err) + } + + logDone("build - copy wild card no find") +} + +func TestBuildCopyWildcardCache(t *testing.T) { + name := "testcopywildcardcache" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox + COPY file1.txt /tmp/`, + map[string]string{ + "file1.txt": "test1", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + // Now make sure we use a cache the 2nd time even with wild cards. + // Use the same context so the file is the same and the checksum will match + ctx.Add("Dockerfile", `FROM busybox + COPY file*.txt /tmp/`) + + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + if id1 != id2 { + t.Fatal("didn't use the cache") + } + + logDone("build - copy wild card cache") +} + +func TestBuildAddSingleFileToNonExistDir(t *testing.T) { + name := "testaddsinglefiletononexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } +} + +func TestBuildAddDirContentToRoot(t *testing.T) { + name := "testadddircontenttoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add directory contents to root") +} + +func TestBuildAddDirContentToExistDir(t *testing.T) { + name := "testadddircontenttoexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add directory contents to existing dir") +} + +func TestBuildAddWholeDirToRoot(t *testing.T) { + name := "testaddwholedirtoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add whole directory to root") +} + +// Testing #5941 +func TestBuildAddEtcToRoot(t *testing.T) { + name := "testaddetctoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch +ADD . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add etc directory to root") +} + +func TestBuildCopySingleFileToRoot(t *testing.T) { + testDirName := "SingleFileToRoot" + sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) + buildDirectory, err := ioutil.TempDir("", "test-build-add") + defer os.RemoveAll(buildDirectory) + + err = copyWithCP(sourceDirectory, buildDirectory) + if err != nil { + t.Fatalf("failed to copy files to temporary directory: %s", err) + } + + buildDirectory = filepath.Join(buildDirectory, testDirName) + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to root") +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func TestBuildCopySingleFileToWorkdir(t *testing.T) { + testDirName := "SingleFileToWorkdir" + sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) + buildDirectory, err := ioutil.TempDir("", "test-build-add") + defer os.RemoveAll(buildDirectory) + + err = copyWithCP(sourceDirectory, buildDirectory) + if err != nil { + t.Fatalf("failed to copy files to temporary directory: %s", err) + } + + buildDirectory = filepath.Join(buildDirectory, testDirName) + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + _, exitCode, err := dockerCmdInDirWithTimeout(5*time.Second, buildDirectory, "build", "-t", "testcopyimg", ".") + if err != nil || exitCode != 0 { + t.Fatalf("build failed: %s", err) + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to workdir") +} + +func TestBuildCopySingleFileToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToExistDir") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to existing dir") +} + +func TestBuildCopySingleFileToNonExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToNonExistDir") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to non-existing dir") +} + +func TestBuildCopyDirContentToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToRoot") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy directory contents to root") +} + +func TestBuildCopyDirContentToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToExistDir") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy directory contents to existing dir") +} + +func TestBuildCopyWholeDirToRoot(t *testing.T) { + testDirName := "WholeDirToRoot" + sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) + buildDirectory, err := ioutil.TempDir("", "test-build-add") + defer os.RemoveAll(buildDirectory) + + err = copyWithCP(sourceDirectory, buildDirectory) + if err != nil { + t.Fatalf("failed to copy files to temporary directory: %s", err) + } + + buildDirectory = filepath.Join(buildDirectory, testDirName) + testDir := filepath.Join(buildDirectory, "test_dir") + if err := os.MkdirAll(testDir, 0755); err != nil { + t.Fatal(err) + } + f, err := os.OpenFile(filepath.Join(testDir, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy whole directory to root") +} + +func TestBuildCopyEtcToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "EtcToRoot") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + logDone("build - copy etc directory to root") +} + +func TestBuildCopyDisallowRemote(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DisallowRemote") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + + if err == nil || exitCode == 0 { + t.Fatalf("building the image should've failed; output: %s", out) + } + + deleteImages("testcopyimg") + logDone("build - copy - disallow copy from remote") +} + +func TestBuildAddBadLinks(t *testing.T) { + const ( + dockerfile = ` + FROM scratch + ADD links.tar / + ADD foo.txt /symlink/ + ` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute" + ) + defer deleteImages(name) + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") + if err != nil { + t.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + symlinkTarget := fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) + tarPath := filepath.Join(ctx.Dir, "links.tar") + nonExistingFile := filepath.Join(tempDir, targetFile) + fooPath := filepath.Join(ctx.Dir, targetFile) + + tarOut, err := os.Create(tarPath) + if err != nil { + t.Fatal(err) + } + + tarWriter := tar.NewWriter(tarOut) + + header := &tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: symlinkTarget, + Mode: 0755, + Uid: 0, + Gid: 0, + } + + err = tarWriter.WriteHeader(header) + if err != nil { + t.Fatal(err) + } + + tarWriter.Close() + tarOut.Close() + + foo, err := os.Create(fooPath) + if err != nil { + t.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + + logDone("build - ADD must add files in container") +} + +func TestBuildAddBadLinksVolume(t *testing.T) { + const ( + dockerfileTemplate = ` + FROM busybox + RUN ln -s /../../../../../../../../%s /x + VOLUME /x + ADD foo.txt /x/` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute-volume" + dockerfile = "" + ) + defer deleteImages(name) + + tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") + if err != nil { + t.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) + nonExistingFile := filepath.Join(tempDir, targetFile) + + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + fooPath := filepath.Join(ctx.Dir, targetFile) + + foo, err := os.Create(fooPath) + if err != nil { + t.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + + logDone("build - ADD should add files in volume") +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func TestBuildWithInaccessibleFilesInContext(t *testing.T) { + { + name := "testbuildinaccessiblefiles" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") + + err = os.Chown(pathToFileWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown file to root: %s", err)) + err = os.Chmod(pathToFileWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 700: %s", err)) + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err == nil || exitCode == 0 { + t.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "no permission to read from ") { + t.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + } + + if !strings.Contains(out, "Error checking context is accessible") { + t.Fatalf("output should've contained the string: Error checking context is accessible") + } + } + { + name := "testbuildinaccessibledirectory" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) + err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) + errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) + err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err == nil || exitCode == 0 { + t.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "can't stat") { + t.Fatalf("output should've contained the string: can't access %s", out) + } + + if !strings.Contains(out, "Error checking context is accessible") { + t.Fatalf("output should've contained the string: Error checking context is accessible") + } + + } + { + name := "testlinksok" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), "../../../../../../../../../../../../../../../../../../../azA"); err != nil { + t.Fatal(err) + } + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + } + { + name := "testbuildignoredinaccessible" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD . /foo/", + map[string]string{ + "directoryWeCantStat/bar": "foo", + ".dockerignore": "directoryWeCantStat", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) + err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) + errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) + err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + t.Fatalf("build should have worked: %s %s", err, out) + } + + } + logDone("build - ADD from context with inaccessible files must fail") + logDone("build - ADD from context with accessible links must work") + logDone("build - ADD from context with ignored inaccessible files must work") +} + +func TestBuildForceRm(t *testing.T) { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + name := "testbuildforcerm" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nRUN true\nRUN thiswillfail", nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "--force-rm", ".") + buildCmd.Dir = ctx.Dir + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err == nil || exitCode == 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("--force-rm shouldn't have left containers behind") + } + + logDone("build - ensure --force-rm doesn't leave containers behind") +} + +func TestBuildRm(t *testing.T) { + name := "testbuildrm" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD foo /\nADD foo /", map[string]string{"foo": "bar"}) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".") + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("-rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".") + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("--rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".") + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore == containerCountAfter { + t.Fatalf("--rm=false should have left containers behind") + } + deleteAllContainers() + deleteImages(name) + + } + + logDone("build - ensure --rm doesn't leave containers behind and that --rm=true is the default") + logDone("build - ensure --rm=false overrides the default") +} + +func TestBuildWithVolumes(t *testing.T) { + var ( + result map[string]map[string]struct{} + name = "testbuildvolumes" + emptyMap = make(map[string]struct{}) + expected = map[string]map[string]struct{}{ + "/test1": emptyMap, + "/test2": emptyMap, + "/test3": emptyMap, + "/test4": emptyMap, + "/test5": emptyMap, + "/test6": emptyMap, + "[/test7": emptyMap, + "/test8]": emptyMap, + } + ) + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + VOLUME /test1 + VOLUME /test2 + VOLUME /test3 /test4 + VOLUME ["/test5", "/test6"] + VOLUME [/test7 /test8] + `, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + err = unmarshalJSON([]byte(res), &result) + if err != nil { + t.Fatal(err) + } + + equal := deepEqual(&expected, &result) + + if !equal { + t.Fatalf("Volumes %s, expected %s", result, expected) + } + + logDone("build - with volumes") +} + +func TestBuildMaintainer(t *testing.T) { + name := "testbuildmaintainer" + expected := "dockerio" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Author") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Maintainer %s, expected %s", res, expected) + } + logDone("build - maintainer") +} + +func TestBuildUser(t *testing.T) { + name := "testbuilduser" + expected := "dockerio" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.User") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("User %s, expected %s", res, expected) + } + logDone("build - user") +} + +func TestBuildRelativeWorkdir(t *testing.T) { + name := "testbuildrelativeworkdir" + expected := "/test2/test3" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + RUN [ "$PWD" = '/' ] + WORKDIR test1 + RUN [ "$PWD" = '/test1' ] + WORKDIR /test2 + RUN [ "$PWD" = '/test2' ] + WORKDIR test3 + RUN [ "$PWD" = '/test2/test3' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.WorkingDir") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Workdir %s, expected %s", res, expected) + } + logDone("build - relative workdir") +} + +func TestBuildWorkdirWithEnvVariables(t *testing.T) { + name := "testbuildworkdirwithenvvariables" + expected := "/test1/test2/$MISSING_VAR" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ENV DIRPATH /test1 + ENV SUBDIRNAME test2 + WORKDIR $DIRPATH + WORKDIR $SUBDIRNAME/$MISSING_VAR`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.WorkingDir") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Workdir %s, expected %s", res, expected) + } + logDone("build - workdir with env variables") +} + +func TestBuildEnv(t *testing.T) { + name := "testbuildenv" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ENV PATH /test:$PATH + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Env") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Env %s, expected %s", res, expected) + } + logDone("build - env") +} + +func TestBuildContextCleanup(t *testing.T) { + name := "testbuildcontextcleanup" + defer deleteImages(name) + entries, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + t.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM scratch + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + t.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + t.Fatalf("context should have been deleted, but wasn't") + } + + logDone("build - verify context cleanup works properly") +} + +func TestBuildContextCleanupFailedBuild(t *testing.T) { + name := "testbuildcontextcleanup" + defer deleteImages(name) + entries, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + t.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM scratch + RUN /non/existing/command`, + true) + if err == nil { + t.Fatalf("expected build to fail, but it didn't") + } + entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + t.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + t.Fatalf("context should have been deleted, but wasn't") + } + + logDone("build - verify context cleanup works properly after a failed build") +} + +func TestBuildCmd(t *testing.T) { + name := "testbuildcmd" + expected := "[/bin/echo Hello World]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + CMD ["/bin/echo", "Hello World"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Cmd %s, expected %s", res, expected) + } + logDone("build - cmd") +} + +func TestBuildExpose(t *testing.T) { + name := "testbuildexpose" + expected := "map[2375/tcp:map[]]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Exposed ports %s, expected %s", res, expected) + } + logDone("build - expose") +} + +func TestBuildEmptyEntrypointInheritance(t *testing.T) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + defer deleteImages(name, name2) + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + + expected := "[/bin/echo]" + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + _, err = buildImage(name2, + fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name), + true) + if err != nil { + t.Fatal(err) + } + res, err = inspectField(name2, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + + expected = "[]" + + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + logDone("build - empty entrypoint inheritance") +} + +func TestBuildEmptyEntrypoint(t *testing.T) { + name := "testbuildentrypoint" + defer deleteImages(name) + expected := "[]" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT []`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + logDone("build - empty entrypoint") +} + +func TestBuildEntrypoint(t *testing.T) { + name := "testbuildentrypoint" + expected := "[/bin/echo]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + logDone("build - entrypoint") +} + +// #6445 ensure ONBUILD triggers aren't committed to grandchildren +func TestBuildOnBuildLimitedInheritence(t *testing.T) { + var ( + out2, out3 string + ) + { + name1 := "testonbuildtrigger1" + dockerfile1 := ` + FROM busybox + RUN echo "GRANDPARENT" + ONBUILD RUN echo "ONBUILD PARENT" + ` + ctx, err := fakeContext(dockerfile1, nil) + if err != nil { + t.Fatal(err) + } + + out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out1, err)) + defer deleteImages(name1) + } + { + name2 := "testonbuildtrigger2" + dockerfile2 := ` + FROM testonbuildtrigger1 + ` + ctx, err := fakeContext(dockerfile2, nil) + if err != nil { + t.Fatal(err) + } + + out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out2, err)) + defer deleteImages(name2) + } + { + name3 := "testonbuildtrigger3" + dockerfile3 := ` + FROM testonbuildtrigger2 + ` + ctx, err := fakeContext(dockerfile3, nil) + if err != nil { + t.Fatal(err) + } + + out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out3, err)) + defer deleteImages(name3) + } + + // ONBUILD should be run in second build. + if !strings.Contains(out2, "ONBUILD PARENT") { + t.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") + } + + // ONBUILD should *not* be run in third build. + if strings.Contains(out3, "ONBUILD PARENT") { + t.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + } + + logDone("build - onbuild") +} + +func TestBuildWithCache(t *testing.T) { + name := "testbuildwithcache" + defer deleteImages(name) + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - with cache") +} + +func TestBuildWithoutCache(t *testing.T) { + name := "testbuildwithoutcache" + defer deleteImages(name) + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - without cache") +} + +func TestBuildADDLocalFileWithCache(t *testing.T) { + name := "testbuildaddlocalfilewithcache" + defer deleteImages(name) + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add local file with cache") +} + +func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { + name := "testbuildaddmultiplelocalfilewithcache" + defer deleteImages(name) + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo Dockerfile /usr/lib/bla/ + RUN [ "$(cat /usr/lib/bla/foo)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add multiple local files with cache") +} + +func TestBuildADDLocalFileWithoutCache(t *testing.T) { + name := "testbuildaddlocalfilewithoutcache" + defer deleteImages(name) + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add local file without cache") +} + +func TestBuildCopyDirButNotFile(t *testing.T) { + name := "testbuildcopydirbutnotfile" + defer deleteImages(name) + dockerfile := ` + FROM scratch + COPY dir /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "dir/foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + // Check that adding file with similar name doesn't mess with cache + if err := ctx.Add("dir_file", "hello2"); err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but wasn't") + } + logDone("build - add current directory but not file") +} + +func TestBuildADDCurrentDirWithCache(t *testing.T) { + name := "testbuildaddcurrentdirwithcache" + defer deleteImages(name) + dockerfile := ` + FROM scratch + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + t.Fatal(err) + } + id3, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id2 == id3 { + t.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + t.Fatal(err) + } + id4, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id3 == id4 { + t.Fatal("The cache should have been invalided but hasn't.") + } + id5, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id4 != id5 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add current directory with cache") +} + +func TestBuildADDCurrentDirWithoutCache(t *testing.T) { + name := "testbuildaddcurrentdirwithoutcache" + defer deleteImages(name) + dockerfile := ` + FROM scratch + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add current directory without cache") +} + +func TestBuildADDRemoteFileWithCache(t *testing.T) { + name := "testbuildaddremotefilewithcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + id1, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add remote file with cache") +} + +func TestBuildADDRemoteFileWithoutCache(t *testing.T) { + name := "testbuildaddremotefilewithoutcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + id1, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add remote file without cache") +} + +func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { + name := "testbuildaddlocalandremotefilewithcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add local and remote file with cache") +} + +func testContextTar(t *testing.T, compression archive.Compression) { + ctx, err := fakeContext( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`, + map[string]string{ + "foo": "bar", + }, + ) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + context, err := archive.Tar(ctx.Dir, compression) + if err != nil { + t.Fatalf("failed to build context tar: %v", err) + } + name := "contexttar" + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + defer deleteImages(name) + buildCmd.Stdin = context + + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + t.Fatalf("build failed to complete: %v %v", out, err) + } + logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression)) +} + +func TestBuildContextTarGzip(t *testing.T) { + testContextTar(t, archive.Gzip) +} + +func TestBuildContextTarNoCompression(t *testing.T) { + testContextTar(t, archive.Uncompressed) +} + +func TestBuildNoContext(t *testing.T) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") + buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n") + + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + t.Fatalf("build failed to complete: %v %v", out, err) + } + + out, exitCode, err = cmd(t, "run", "nocontext") + if out != "ok\n" { + t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + } + + deleteImages("nocontext") + logDone("build - build an image with no context") +} + +// TODO: TestCaching +func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { + name := "testbuildaddlocalandremotefilewithoutcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add local and remote file without cache") +} + +func TestBuildWithVolumeOwnership(t *testing.T) { + name := "testbuildimg" + defer deleteImages(name) + + _, err := buildImage(name, + `FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`, + true) + + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + + if expected := "drw-------"; !strings.Contains(out, expected) { + t.Fatalf("expected %s received %s", expected, out) + } + + if expected := "daemon daemon"; !strings.Contains(out, expected) { + t.Fatalf("expected %s received %s", expected, out) + } + + logDone("build - volume ownership") +} + +// testing #1405 - config.Cmd does not get cleaned up if +// utilizing cache +func TestBuildEntrypointRunCleanup(t *testing.T) { + name := "testbuildcmdcleanup" + defer deleteImages(name) + if _, err := buildImage(name, + `FROM busybox + RUN echo "hello"`, + true); err != nil { + t.Fatal(err) + } + + ctx, err := fakeContext(`FROM busybox + RUN echo "hello" + ADD foo /foo + ENTRYPOINT ["/bin/echo"]`, + map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + // Cmd must be cleaned up + if expected := ""; res != expected { + t.Fatalf("Cmd %s, expected %s", res, expected) + } + logDone("build - cleanup cmd after RUN") +} + +func TestBuildForbiddenContextPath(t *testing.T) { + name := "testbuildforbidpath" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + ADD ../../ test/ + `, + map[string]string{ + "test.txt": "test1", + "other.txt": "other", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + expected := "Forbidden path outside the build context: ../../ " + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + + logDone("build - forbidden context path") +} + +func TestBuildADDFileNotFound(t *testing.T) { + name := "testbuildaddnotfound" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + ADD foo /usr/local/bar`, + map[string]string{"bar": "hello"}) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + if !strings.Contains(err.Error(), "foo: no such file or directory") { + t.Fatalf("Wrong error %v, must be about missing foo file or directory", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - add file not found") +} + +func TestBuildInheritance(t *testing.T) { + name := "testbuildinheritance" + defer deleteImages(name) + + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + t.Fatal(err) + } + ports1, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name), + true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if expected := "[/bin/echo]"; res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + ports2, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + if ports1 != ports2 { + t.Fatalf("Ports must be same: %s != %s", ports1, ports2) + } + logDone("build - inheritance") +} + +func TestBuildFails(t *testing.T) { + name := "testbuildfails" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + RUN sh -c "exit 23"`, + true) + if err != nil { + if !strings.Contains(err.Error(), "returned a non-zero code: 23") { + t.Fatalf("Wrong error %v, must be about non-zero code 23", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - fails") +} + +func TestBuildFailsDockerfileEmpty(t *testing.T) { + name := "testbuildfails" + defer deleteImages(name) + _, err := buildImage(name, ``, true) + if err != nil { + if !strings.Contains(err.Error(), "Dockerfile cannot be empty") { + t.Fatalf("Wrong error %v, must be about empty Dockerfile", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - fails with empty dockerfile") +} + +func TestBuildOnBuild(t *testing.T) { + name := "testbuildonbuild" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ONBUILD RUN touch foobar`, + true) + if err != nil { + t.Fatal(err) + } + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name), + true) + if err != nil { + t.Fatal(err) + } + logDone("build - onbuild") +} + +func TestBuildOnBuildForbiddenChained(t *testing.T) { + name := "testbuildonbuildforbiddenchained" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ONBUILD ONBUILD RUN touch foobar`, + true) + if err != nil { + if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { + t.Fatalf("Wrong error %v, must be about chaining ONBUILD", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden chained") +} + +func TestBuildOnBuildForbiddenFrom(t *testing.T) { + name := "testbuildonbuildforbiddenfrom" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ONBUILD FROM scratch`, + true) + if err != nil { + if !strings.Contains(err.Error(), "FROM isn't allowed as an ONBUILD trigger") { + t.Fatalf("Wrong error %v, must be about FROM forbidden", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden from") +} + +func TestBuildOnBuildForbiddenMaintainer(t *testing.T) { + name := "testbuildonbuildforbiddenmaintainer" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ONBUILD MAINTAINER docker.io`, + true) + if err != nil { + if !strings.Contains(err.Error(), "MAINTAINER isn't allowed as an ONBUILD trigger") { + t.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden maintainer") +} + +// gh #2446 +func TestBuildAddToSymlinkDest(t *testing.T) { + name := "testbuildaddtosymlinkdest" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox + RUN mkdir /foo + RUN ln -s /foo /bar + ADD foo /bar/ + RUN [ -f /bar/foo ] + RUN [ -f /foo/foo ]`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add to symlink destination") +} + +func TestBuildEscapeWhitespace(t *testing.T) { + name := "testbuildescaping" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM busybox + MAINTAINER "Docker \ +IO " + `, true) + + res, err := inspectField(name, "Author") + + if err != nil { + t.Fatal(err) + } + + if res != "Docker IO " { + t.Fatal("Parsed string did not match the escaped string") + } + + logDone("build - validate escaping whitespace") +} + +func TestBuildDockerignore(t *testing.T) { + name := "testbuilddockerignore" + defer deleteImages(name) + dockerfile := ` + FROM busybox + ADD . /bla + RUN [[ -f /bla/src/x.go ]] + RUN [[ -f /bla/Makefile ]] + RUN [[ ! -e /bla/src/_vendor ]] + RUN [[ ! -e /bla/.gitignore ]] + RUN [[ ! -e /bla/README.md ]] + RUN [[ ! -e /bla/.git ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + ".gitignore": "", + "README.md": "readme", + ".dockerignore": ".git\npkg\n.gitignore\nsrc/_vendor\n*.md", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - test .dockerignore") +} + +func TestBuildDockerignoringDockerfile(t *testing.T) { + name := "testbuilddockerignoredockerfile" + defer deleteImages(name) + dockerfile := ` + FROM scratch` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + ".dockerignore": "Dockerfile\n", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err = buildImageFromContext(name, ctx, true); err == nil { + t.Fatalf("Didn't get expected error from ignoring Dockerfile") + } + logDone("build - test .dockerignore of Dockerfile") +} + +func TestBuildDockerignoringWholeDir(t *testing.T) { + name := "testbuilddockerignorewholedir" + defer deleteImages(name) + dockerfile := ` + FROM busybox + COPY . / + RUN [[ ! -e /.gitignore ]] + RUN [[ -f /Makefile ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".dockerignore": ".*\n", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err = buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - test .dockerignore whole dir with .*") +} + +func TestBuildLineBreak(t *testing.T) { + name := "testbuildlinebreak" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /var/run/sshd +RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] +RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, + true) + if err != nil { + t.Fatal(err) + } + logDone("build - line break with \\") +} + +func TestBuildEOLInLine(t *testing.T) { + name := "testbuildeolinline" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass > /tmp/passwd' +RUN echo "foo \n bar"; echo "baz" +RUN mkdir -p /var/run/sshd +RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] +RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, + true) + if err != nil { + t.Fatal(err) + } + logDone("build - end of line in dockerfile instruction") +} + +func TestBuildCommentsShebangs(t *testing.T) { + name := "testbuildcomments" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox +# This is an ordinary comment. +RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh +RUN [ ! -x /hello.sh ] +# comment with line break \ +RUN chmod +x /hello.sh +RUN [ -x /hello.sh ] +RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] +RUN [ "$(/hello.sh)" = "hello world" ]`, + true) + if err != nil { + t.Fatal(err) + } + logDone("build - comments and shebangs") +} + +func TestBuildUsersAndGroups(t *testing.T) { + name := "testbuildusers" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN id -G +RUN id -Gn +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] + +# Add a "supplementary" group for our dockerio user +RUN echo 'supplementary:x:1002:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] +USER 1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER dockerio:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, + true) + if err != nil { + t.Fatal(err) + } + logDone("build - users and groups") +} + +func TestBuildEnvUsage(t *testing.T) { + name := "testbuildenvusage" + defer deleteImages(name) + dockerfile := `FROM busybox +ENV HOME /root +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] +ENV FOO /foo/baz +ENV BAR /bar +ENV BAZ $BAR +ENV FOOPATH $PATH:$FOO +RUN [ "$BAR" = "$BAZ" ] +RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] +ENV FROM hello/docker/world +ENV TO /docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + t.Fatal(err) + } + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + logDone("build - environment variables usage") +} + +func TestBuildAddScript(t *testing.T) { + name := "testbuildaddscript" + defer deleteImages(name) + dockerfile := ` +FROM busybox +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "test": "#!/bin/sh\necho 'test!' > /testfile", + }) + if err != nil { + t.Fatal(err) + } + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + logDone("build - add and run script") +} + +func TestBuildAddTar(t *testing.T) { + name := "testbuildaddtar" + defer deleteImages(name) + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar / +RUN cat /test/foo | grep Hi +ADD test.tar /test.tar +RUN cat /test.tar/test/foo | grep Hi +ADD test.tar /unlikely-to-exist +RUN cat /unlikely-to-exist/test/foo | grep Hi +ADD test.tar /unlikely-to-exist-trailing-slash/ +RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi +RUN mkdir /existing-directory +ADD test.tar /existing-directory +RUN cat /existing-directory/test/foo | grep Hi +ADD test.tar /existing-directory-trailing-slash/ +RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + t.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + t.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + t.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("failed to close tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + t.Fatalf("failed to open destination dockerfile: %v", err) + } + return &FakeContext{Dir: tmpDir} + }() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatalf("build failed to complete for TestBuildAddTar: %v", err) + } + + logDone("build - ADD tar") +} + +func TestBuildAddTarXz(t *testing.T) { + name := "testbuildaddtarxz" + defer deleteImages(name) + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz / + RUN cat /test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + t.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + t.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + t.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("failed to close tar archive: %v", err) + } + xzCompressCmd := exec.Command("xz", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + t.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + t.Fatalf("failed to open destination dockerfile: %v", err) + } + return &FakeContext{Dir: tmpDir} + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + + logDone("build - ADD tar.xz") +} + +func TestBuildAddTarXzGz(t *testing.T) { + name := "testbuildaddtarxzgz" + defer deleteImages(name) + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz.gz / + RUN ls /test.tar.xz.gz` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + t.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + t.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + t.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + t.Fatal(err, out) + } + + gzipCompressCmd := exec.Command("gzip", "test.tar.xz") + gzipCompressCmd.Dir = tmpDir + out, _, err = runCommandWithOutput(gzipCompressCmd) + if err != nil { + t.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + t.Fatalf("failed to open destination dockerfile: %v", err) + } + return &FakeContext{Dir: tmpDir} + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + + logDone("build - ADD tar.xz.gz") +} + +func TestBuildFromGIT(t *testing.T) { + name := "testbuildfromgit" + defer deleteImages(name) + git, err := fakeGIT("repo", map[string]string{ + "Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "first": "test git data", + }) + if err != nil { + t.Fatal(err) + } + defer git.Close() + + _, err = buildImageFromPath(name, git.RepoURL, true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Author") + if err != nil { + t.Fatal(err) + } + if res != "docker" { + t.Fatalf("Maintainer should be docker, got %s", res) + } + logDone("build - build from GIT") +} + +func TestBuildCleanupCmdOnEntrypoint(t *testing.T) { + name := "testbuildcmdcleanuponentrypoint" + defer deleteImages(name) + if _, err := buildImage(name, + `FROM scratch + CMD ["test"] + ENTRYPOINT ["echo"]`, + true); err != nil { + t.Fatal(err) + } + if _, err := buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name), + true); err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if expected := ""; res != expected { + t.Fatalf("Cmd %s, expected %s", res, expected) + } + res, err = inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if expected := "[cat]"; res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + logDone("build - cleanup cmd on ENTRYPOINT") +} + +func TestBuildClearCmd(t *testing.T) { + name := "testbuildclearcmd" + defer deleteImages(name) + _, err := buildImage(name, + `From scratch + ENTRYPOINT ["/bin/bash"] + CMD []`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if res != "[]" { + t.Fatalf("Cmd %s, expected %s", res, "[]") + } + logDone("build - clearcmd") +} + +func TestBuildEmptyCmd(t *testing.T) { + name := "testbuildemptycmd" + defer deleteImages(name) + if _, err := buildImage(name, "FROM scratch\nMAINTAINER quux\n", true); err != nil { + t.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if res != "null" { + t.Fatalf("Cmd %s, expected %s", res, "null") + } + logDone("build - empty cmd") +} + +func TestBuildOnBuildOutput(t *testing.T) { + name := "testbuildonbuildparent" + defer deleteImages(name) + if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { + t.Fatal(err) + } + + childname := "testbuildonbuildchild" + defer deleteImages(childname) + + _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) + if err != nil { + t.Fatal(err) + } + + if !strings.Contains(out, "Trigger 0, RUN echo foo") { + t.Fatal("failed to find the ONBUILD output", out) + } + + logDone("build - onbuild output") +} + +func TestBuildInvalidTag(t *testing.T) { + name := "abcd:" + makeRandomString(200) + defer deleteImages(name) + _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) + // if the error doesnt check for illegal tag name, or the image is built + // then this should fail + if !strings.Contains(out, "Illegal tag name") || strings.Contains(out, "Sending build context to Docker daemon") { + t.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) + } + logDone("build - invalid tag") +} + +func TestBuildCmdShDashC(t *testing.T) { + name := "testbuildcmdshc" + defer deleteImages(name) + if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + t.Fatal(err, res) + } + + expected := `["/bin/sh","-c","echo cmd"]` + + if res != expected { + t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + + logDone("build - cmd should have sh -c for non-json") +} + +func TestBuildCmdJSONNoShDashC(t *testing.T) { + name := "testbuildcmdjson" + defer deleteImages(name) + if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + t.Fatal(err, res) + } + + expected := `["echo","cmd"]` + + if res != expected { + t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + + logDone("build - cmd should not have /bin/sh -c for json") +} + +func TestBuildIgnoreInvalidInstruction(t *testing.T) { + name := "testbuildignoreinvalidinstruction" + defer deleteImages(name) + + out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true) + if err != nil { + t.Fatal(err, out) + } + + logDone("build - ignore invalid Dockerfile instruction") +} + +func TestBuildEntrypointInheritance(t *testing.T) { + defer deleteImages("parent", "child") + + if _, err := buildImage("parent", ` + FROM busybox + ENTRYPOINT exit 130 + `, true); err != nil { + t.Fatal(err) + } + + status, _ := runCommand(exec.Command(dockerBinary, "run", "parent")) + + if status != 130 { + t.Fatalf("expected exit code 130 but received %d", status) + } + + if _, err := buildImage("child", ` + FROM parent + ENTRYPOINT exit 5 + `, true); err != nil { + t.Fatal(err) + } + + status, _ = runCommand(exec.Command(dockerBinary, "run", "child")) + + if status != 5 { + t.Fatal("expected exit code 5 but received %d", status) + } + + logDone("build - clear entrypoint") +} + +func TestBuildEntrypointInheritanceInspect(t *testing.T) { + var ( + name = "testbuildepinherit" + name2 = "testbuildepinherit2" + expected = `["/bin/sh","-c","echo quux"]` + ) + + defer deleteImages(name, name2) + + if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { + t.Fatal(err) + } + + if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name2, "Config.Entrypoint") + if err != nil { + t.Fatal(err, res) + } + + if res != expected { + t.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) + if err != nil { + t.Fatal(err, out) + } + + expected = "quux" + + if strings.TrimSpace(out) != expected { + t.Fatalf("Expected output is %s, got %s", expected, out) + } + + logDone("build - entrypoint override inheritance properly") +} + +func TestBuildRunShEntrypoint(t *testing.T) { + name := "testbuildentrypoint" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT /bin/echo`, + true) + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", name)) + + if err != nil { + t.Fatal(err, out) + } + + logDone("build - entrypoint with /bin/echo running successfully") +} + +func TestBuildExoticShellInterpolation(t *testing.T) { + name := "testbuildexoticshellinterpolation" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM busybox + + ENV SOME_VAR a.b.c + + RUN [ "$SOME_VAR" = 'a.b.c' ] + RUN [ "${SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR%.*}" = 'a.b' ] + RUN [ "${SOME_VAR%%.*}" = 'a' ] + RUN [ "${SOME_VAR#*.}" = 'b.c' ] + RUN [ "${SOME_VAR##*.}" = 'c' ] + RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] + RUN [ "${#SOME_VAR}" = '5' ] + + RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] + RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] + RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] + `, false) + if err != nil { + t.Fatal(err) + } + + logDone("build - exotic shell interpolation") +} + +func TestBuildSymlinkBreakout(t *testing.T) { + name := "testbuildsymlinkbreakout" + tmpdir, err := ioutil.TempDir("", name) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` + from busybox + add symlink.tar / + add inject /symlink/ + `), 0644); err != nil { + t.Fatal(err) + } + inject := filepath.Join(ctx, "inject") + if err := ioutil.WriteFile(inject, nil, 0644); err != nil { + t.Fatal(err) + } + f, err := os.Create(filepath.Join(ctx, "symlink.tar")) + if err != nil { + t.Fatal(err) + } + w := tar.NewWriter(f) + w.WriteHeader(&tar.Header{ + Name: "symlink2", + Typeflag: tar.TypeSymlink, + Linkname: "/../../../../../../../../../../../../../../", + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.WriteHeader(&tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: filepath.Join("symlink2", tmpdir), + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.Close() + f.Close() + if _, err := buildImageFromContext(name, &FakeContext{Dir: ctx}, false); err != nil { + t.Fatal(err) + } + if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { + t.Fatal("symlink breakout - inject") + } else if !os.IsNotExist(err) { + t.Fatalf("unexpected error: %v", err) + } + logDone("build - symlink breakout") +} + +func TestBuildXZHost(t *testing.T) { + name := "testbuildxzhost" + defer deleteImages(name) + + ctx, err := fakeContext(` +FROM busybox +ADD xz /usr/local/sbin/ +RUN chmod 755 /usr/local/sbin/xz +ADD test.xz / +RUN [ ! -e /injected ]`, + map[string]string{ + "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", + "xz": "#!/bin/sh\ntouch /injected", + }) + + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + logDone("build - xz host is being used") +} diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go new file mode 100644 index 00000000..46b99869 --- /dev/null +++ b/integration-cli/docker_cli_commit_test.go @@ -0,0 +1,139 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestCommitAfterContainerIsDone(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + + deleteContainer(cleanedContainerID) + deleteImages(cleanedImageID) + + logDone("commit - echo foo and commit the image") +} + +func TestCommitWithoutPause(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + commitCmd := exec.Command(dockerBinary, "commit", "-p=false", cleanedContainerID) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + + deleteContainer(cleanedContainerID) + deleteImages(cleanedImageID) + + logDone("commit - echo foo and commit the image with --pause=false") +} + +func TestCommitNewFile(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "commiter") + imageID, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + imageID = strings.Trim(imageID, "\r\n") + + cmd = exec.Command(dockerBinary, "run", imageID, "cat", "/foo") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual != "koye" { + t.Fatalf("expected output koye received %s", actual) + } + + deleteAllContainers() + deleteImages(imageID) + + logDone("commit - commit file and read") +} + +func TestCommitTTY(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "tty", "ttytest") + imageID, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + imageID = strings.Trim(imageID, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + logDone("commit - commit tty") +} + +func TestCommitWithHostBindMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest") + imageID, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(imageID, err) + } + imageID = strings.Trim(imageID, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "bindtest", "true") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + deleteImages(imageID) + + logDone("commit - commit bind mounted file") +} diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go new file mode 100644 index 00000000..a5e16bb2 --- /dev/null +++ b/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,411 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func TestCpGarbagePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("../../../../../../../../../../../../", cpFullPath) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- garbage path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for garbage path") + } + + logDone("cp - garbage paths relative to container's rootfs") +} + +// Check that relative paths are relative to the container's rootfs +func TestCpRelativePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path, _ := filepath.Rel("/", cpFullPath) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- relative path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for relative path") + } + + logDone("cp - relative paths relative to container's rootfs") +} + +// Check that absolute paths are relative to the container's rootfs +func TestCpAbsolutePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- absolute path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for absolute path") + } + + logDone("cp - absolute paths relative to container's rootfs") +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func TestCpAbsoluteSymlink(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("/", "container_path") + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- absolute symlink can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for absolute symlink") + } + + logDone("cp - absolute symlink relative to container's rootfs") +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func TestCpSymlinkComponent(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("/", "container_path", cpTestName) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- symlink path component can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for symlink path component") + } + + logDone("cp - symlink path components relative to container's rootfs") +} + +// Check that cp with unprivileged user doesn't return any error +func TestCpUnprivilegedUser(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpdir) + + if err = os.Chmod(tmpdir, 0777); err != nil { + t.Fatal(err) + } + + path := cpTestName + + _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+cleanedContainerID+":"+path+" "+tmpdir)) + if err != nil { + t.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err) + } + + logDone("cp - unprivileged user") +} + +func TestCpToDot(t *testing.T) { + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = dockerCmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + cwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer os.Chdir(cwd) + if err := os.Chdir(tmpdir); err != nil { + t.Fatal(err) + } + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", ".") + if err != nil { + t.Fatalf("couldn't docker cp to \".\" path: %s", err) + } + content, err := ioutil.ReadFile("./test") + if string(content) != "lololol\n" { + t.Fatal("Wrong content in copied file %q, should be %q", content, "lololol\n") + } + logDone("cp - to dot path") +} diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go new file mode 100644 index 00000000..226a3f5a --- /dev/null +++ b/integration-cli/docker_cli_create_test.go @@ -0,0 +1,116 @@ +package main + +import ( + "encoding/json" + "fmt" + "os/exec" + "testing" + "time" +) + +// Make sure we can create a simple container with some args +func TestCreateArgs(t *testing.T) { + runCmd := exec.Command(dockerBinary, "create", "busybox", "command", "arg1", "arg2", "arg with space") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + containers := []struct { + ID string + Created time.Time + Path string + Args []string + Image string + }{} + if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil { + t.Fatalf("Error inspecting the container: %s", err) + } + if len(containers) != 1 { + t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + } + + c := containers[0] + if c.Path != "command" { + t.Fatalf("Unexpected container path. Expected command, received: %s", c.Path) + } + + b := false + expected := []string{"arg1", "arg2", "arg with space"} + for i, arg := range expected { + if arg != c.Args[i] { + b = true + break + } + } + if len(c.Args) != len(expected) || b { + t.Fatalf("Unexpected args. Expected %v, received: %v", expected, c.Args) + } + + deleteAllContainers() + + logDone("create - args") +} + +// Make sure we can set hostconfig options too +func TestCreateHostConfig(t *testing.T) { + runCmd := exec.Command(dockerBinary, "create", "-P", "busybox", "echo") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + containers := []struct { + HostConfig *struct { + PublishAllPorts bool + } + }{} + if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil { + t.Fatalf("Error inspecting the container: %s", err) + } + if len(containers) != 1 { + t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + } + + c := containers[0] + if c.HostConfig == nil { + t.Fatalf("Expected HostConfig, got none") + } + + if !c.HostConfig.PublishAllPorts { + t.Fatalf("Expected PublishAllPorts, got false") + } + + deleteAllContainers() + + logDone("create - hostconfig") +} + +// "test123" should be printed by docker create + start +func TestCreateEchoStdout(t *testing.T) { + runCmd := exec.Command(dockerBinary, "create", "busybox", "echo", "test123") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "start", "-ai", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if out != "test123\n" { + t.Errorf("container should've printed 'test123', got '%s'", out) + } + + deleteAllContainers() + + logDone("create - echo test123") +} diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go new file mode 100644 index 00000000..6160e57e --- /dev/null +++ b/integration-cli/docker_cli_daemon_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "encoding/json" + "os" + "strings" + "testing" +) + +func TestDaemonRestartWithRunningContainersPorts(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top1: err=%v\n%s", err, out) + } + // --restart=no by default + if out, err := d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top2: err=%v\n%s", err, out) + } + + testRun := func(m map[string]bool, prefix string) { + var format string + for c, shouldRun := range m { + out, err := d.Cmd("ps") + if err != nil { + t.Fatalf("Could not run ps: err=%v\n%q", err, out) + } + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + if shouldRun != strings.Contains(out, c) { + t.Fatalf(format, prefix, c) + } + } + } + + testRun(map[string]bool{"top1": true, "top2": true}, "") + + if err := d.Restart(); err != nil { + t.Fatalf("Could not restart daemon: %v", err) + } + + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") + + logDone("daemon - running containers on daemon restart") +} + +func TestDaemonRestartWithVolumesRefs(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + t.Fatal(err, out) + } + if err := d.Restart(); err != nil { + t.Fatal(err) + } + if _, err := d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox"); err != nil { + t.Fatal(err) + } + if out, err := d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + t.Fatal(err, out) + } + v, err := d.Cmd("inspect", "--format", "{{ json .Volumes }}", "volrestarttest1") + if err != nil { + t.Fatal(err) + } + volumes := make(map[string]string) + json.Unmarshal([]byte(v), &volumes) + if _, err := os.Stat(volumes["/foo"]); err != nil { + t.Fatalf("Expected volume to exist: %s - %s", volumes["/foo"], err) + } + + logDone("daemon - volume refs are restored") +} + +func TestDaemonStartIptablesFalse(t *testing.T) { + d := NewDaemon(t) + if err := d.Start("--iptables=false"); err != nil { + t.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) + } + d.Stop() + + logDone("daemon - started daemon with iptables=false") +} diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go new file mode 100644 index 00000000..726f2349 --- /dev/null +++ b/integration-cli/docker_cli_diff_test.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure that an added file shows up in docker diff +func TestDiffFilenameShownInOutput(t *testing.T) { + containerCmd := `echo foo > /root/bar` + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains("A /root/bar", line) { + found = true + break + } + } + if !found { + t.Errorf("couldn't find the new file in docker diff's output: %v", out) + } + deleteContainer(cleanCID) + + logDone("diff - check if created file shows up") +} + +// test to ensure GH #3840 doesn't occur any more +func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { + // this is a list of files which shouldn't show up in `docker diff` + dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"} + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < 20; i++ { + containerCmd := `echo foo > /root/bar` + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + + deleteContainer(cleanCID) + + for _, filename := range dockerinitFiles { + if strings.Contains(out, filename) { + t.Errorf("found file which should've been ignored %v in diff output", filename) + } + } + } + + logDone("diff - check if ignored files show up in diff") +} + +func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "0") + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + deleteContainer(cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/kmsg": true, // lxc + } + + for _, line := range strings.Split(out, "\n") { + if line != "" && !expected[line] { + t.Errorf("'%s' is shown in the diff but shouldn't", line) + } + } + + logDone("diff - ensure that only kmsg and ptmx in diff") +} diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go new file mode 100644 index 00000000..b7f410b1 --- /dev/null +++ b/integration-cli/docker_cli_events_test.go @@ -0,0 +1,217 @@ +package main + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strconv" + "strings" + "testing" + "time" + "unicode" + + "github.com/kr/pty" +) + +func TestEventsUntag(t *testing.T) { + out, _, _ := cmd(t, "images", "-q") + image := strings.Split(out, "\n")[0] + cmd(t, "tag", image, "utest:tag1") + cmd(t, "tag", image, "utest:tag2") + cmd(t, "rmi", "utest:tag1") + cmd(t, "rmi", "utest:tag2") + eventsCmd := exec.Command("timeout", "0.2", dockerBinary, "events", "--since=1") + out, _, _ = runCommandWithOutput(eventsCmd) + events := strings.Split(out, "\n") + nEvents := len(events) + // The last element after the split above will be an empty string, so we + // get the two elements before the last, which are the untags we're + // looking for. + for _, v := range events[nEvents-3 : nEvents-1] { + if !strings.Contains(v, "untag") { + t.Fatalf("event should be untag, not %#v", v) + } + } + logDone("events - untags are logged") +} + +func TestEventsPause(t *testing.T) { + out, _, _ := cmd(t, "images", "-q") + image := strings.Split(out, "\n")[0] + cmd(t, "run", "-d", "--name", "testeventpause", image, "sleep", "2") + cmd(t, "pause", "testeventpause") + cmd(t, "unpause", "testeventpause") + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ = runCommandWithOutput(eventsCmd) + events := strings.Split(out, "\n") + if len(events) <= 1 { + t.Fatalf("Missing expected event") + } + + pauseEvent := strings.Fields(events[len(events)-3]) + unpauseEvent := strings.Fields(events[len(events)-2]) + + if pauseEvent[len(pauseEvent)-1] != "pause" { + t.Fatalf("event should be pause, not %#v", pauseEvent) + } + if unpauseEvent[len(unpauseEvent)-1] != "unpause" { + t.Fatalf("event should be pause, not %#v", unpauseEvent) + } + + logDone("events - pause/unpause is logged") +} + +func TestEventsContainerFailStartDie(t *testing.T) { + out, _, _ := cmd(t, "images", "-q") + image := strings.Split(out, "\n")[0] + eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg") + _, _, err := runCommandWithOutput(eventsCmd) + if err == nil { + t.Fatalf("Container run with command blerg should have failed, but it did not") + } + + eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ = runCommandWithOutput(eventsCmd) + events := strings.Split(out, "\n") + if len(events) <= 1 { + t.Fatalf("Missing expected event") + } + + startEvent := strings.Fields(events[len(events)-3]) + dieEvent := strings.Fields(events[len(events)-2]) + + if startEvent[len(startEvent)-1] != "start" { + t.Fatalf("event should be start, not %#v", startEvent) + } + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + + logDone("events - container failed to start logs die") +} + +func TestEventsLimit(t *testing.T) { + for i := 0; i < 30; i++ { + cmd(t, "run", "busybox", "echo", strconv.Itoa(i)) + } + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ := runCommandWithOutput(eventsCmd) + events := strings.Split(out, "\n") + nEvents := len(events) - 1 + if nEvents != 64 { + t.Fatalf("events should be limited to 64, but received %d", nEvents) + } + logDone("events - limited to 64 entries") +} + +func TestEventsContainerEvents(t *testing.T) { + cmd(t, "run", "--rm", "busybox", "true") + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, exitCode, err := runCommandWithOutput(eventsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + } + events := strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) < 4 { + t.Fatalf("Missing expected event") + } + createEvent := strings.Fields(events[len(events)-4]) + startEvent := strings.Fields(events[len(events)-3]) + dieEvent := strings.Fields(events[len(events)-2]) + destroyEvent := strings.Fields(events[len(events)-1]) + if createEvent[len(createEvent)-1] != "create" { + t.Fatalf("event should be create, not %#v", createEvent) + } + if startEvent[len(startEvent)-1] != "start" { + t.Fatalf("event should be pause, not %#v", startEvent) + } + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be pause, not %#v", dieEvent) + } + if destroyEvent[len(destroyEvent)-1] != "destroy" { + t.Fatalf("event should be pause, not %#v", destroyEvent) + } + + logDone("events - container create, start, die, destroy is logged") +} + +func TestEventsImageUntagDelete(t *testing.T) { + name := "testimageevents" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + MAINTAINER "docker"`, + true) + if err != nil { + t.Fatal(err) + } + if err := deleteImages(name); err != nil { + t.Fatal(err) + } + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, exitCode, err := runCommandWithOutput(eventsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + } + events := strings.Split(out, "\n") + t.Log(events) + events = events[:len(events)-1] + if len(events) < 2 { + t.Fatalf("Missing expected event") + } + untagEvent := strings.Fields(events[len(events)-2]) + deleteEvent := strings.Fields(events[len(events)-1]) + if untagEvent[len(untagEvent)-1] != "untag" { + t.Fatalf("untag should be untag, not %#v", untagEvent) + } + if deleteEvent[len(deleteEvent)-1] != "delete" { + t.Fatalf("delete should be delete, not %#v", deleteEvent) + } + logDone("events - image untag, delete is logged") +} + +// #5979 +func TestEventsRedirectStdout(t *testing.T) { + + since := time.Now().Unix() + + cmd(t, "run", "busybox", "true") + + defer deleteAllContainers() + + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("could not create temp file: %v", err) + } + defer os.Remove(file.Name()) + + command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, time.Now().Unix(), file.Name()) + _, tty, err := pty.Open() + if err != nil { + t.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + if err := cmd.Run(); err != nil { + t.Fatalf("run err for command %q: %v", command, err) + } + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + for _, c := range scanner.Text() { + if unicode.IsControl(c) { + t.Fatalf("found control character %v", []byte(string(c))) + } + } + } + if err := scanner.Err(); err != nil { + t.Fatalf("Scan err for command %q: %v", command, err) + } + + logDone("events - redirect stdout") +} diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go new file mode 100644 index 00000000..0e012aa4 --- /dev/null +++ b/integration-cli/docker_cli_exec_test.go @@ -0,0 +1,139 @@ +package main + +import ( + "bufio" + "os/exec" + "strings" + "testing" + "time" +) + +func TestExec(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + execCmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/tmp/file") + + out, _, err = runCommandWithOutput(execCmd) + errorOut(err, t, out) + + out = strings.Trim(out, "\r\n") + + if expected := "test"; out != expected { + t.Errorf("container exec should've printed %q but printed %q", expected, out) + } + + deleteAllContainers() + + logDone("exec - basic test") +} + +func TestExecInteractive(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") + stdin, err := execCmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + stdout, err := execCmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if err := execCmd.Start(); err != nil { + t.Fatal(err) + } + if _, err := stdin.Write([]byte("cat /tmp/file\n")); err != nil { + t.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + t.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "test" { + t.Fatalf("Output should be 'test', got '%q'", line) + } + if err := stdin.Close(); err != nil { + t.Fatal(err) + } + finish := make(chan struct{}) + go func() { + if err := execCmd.Wait(); err != nil { + t.Fatal(err) + } + close(finish) + }() + select { + case <-finish: + case <-time.After(1 * time.Second): + t.Fatal("docker exec failed to exit on stdin close") + } + + deleteAllContainers() + + logDone("exec - Interactive test") +} + +func TestExecAfterContainerRestart(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "exec", cleanedContainerID, "echo", "hello") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + outStr := strings.TrimSpace(out) + if outStr != "hello" { + t.Errorf("container should've printed hello, instead printed %q", outStr) + } + + deleteAllContainers() + + logDone("exec - exec running container after container restart") +} + +func TestExecAfterDaemonRestart(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top: err=%v\n%s", err, out) + } + + if err := d.Restart(); err != nil { + t.Fatalf("Could not restart daemon: %v", err) + } + + if out, err := d.Cmd("start", "top"); err != nil { + t.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out) + } + + out, err := d.Cmd("exec", "top", "echo", "hello") + if err != nil { + t.Fatalf("Could not exec on container top: err=%v\n%s", err, out) + } + + outStr := strings.TrimSpace(string(out)) + if outStr != "hello" { + t.Errorf("container should've printed hello, instead printed %q", outStr) + } + + logDone("exec - exec running container after daemon restart") +} diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go new file mode 100644 index 00000000..b044cd83 --- /dev/null +++ b/integration-cli/docker_cli_export_import_test.go @@ -0,0 +1,50 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "testing" +) + +// export an image and try to import it into a new one +func TestExportContainerAndImportImage(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err) + } + + exportCmdTemplate := `%v export %v > /tmp/testexp.tar` + exportCmdFinal := fmt.Sprintf(exportCmdTemplate, dockerBinary, cleanedContainerID) + exportCmd := exec.Command("bash", "-c", exportCmdFinal) + out, _, err = runCommandWithOutput(exportCmd) + errorOut(err, t, fmt.Sprintf("failed to export container: %v %v", out, err)) + + importCmdFinal := `cat /tmp/testexp.tar | docker import - repo/testexp:v1` + importCmd := exec.Command("bash", "-c", importCmdFinal) + out, _, err = runCommandWithOutput(importCmd) + errorOut(err, t, fmt.Sprintf("failed to import image: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("output should've been an image id: %v %v", out, err)) + + deleteContainer(cleanedContainerID) + deleteImages("repo/testexp:v1") + + os.Remove("/tmp/testexp.tar") + + logDone("export - export a container") + logDone("import - import an image") +} diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go new file mode 100644 index 00000000..8b1a73b5 --- /dev/null +++ b/integration-cli/docker_cli_history_test.go @@ -0,0 +1,85 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// This is a heisen-test. Because the created timestamp of images and the behavior of +// sort is not predictable it doesn't always fail. +func TestBuildHistory(t *testing.T) { + name := "testbuildhistory" + defer deleteImages(name) + _, err := buildImage(name, `FROM busybox +RUN echo "A" +RUN echo "B" +RUN echo "C" +RUN echo "D" +RUN echo "E" +RUN echo "F" +RUN echo "G" +RUN echo "H" +RUN echo "I" +RUN echo "J" +RUN echo "K" +RUN echo "L" +RUN echo "M" +RUN echo "N" +RUN echo "O" +RUN echo "P" +RUN echo "Q" +RUN echo "R" +RUN echo "S" +RUN echo "T" +RUN echo "U" +RUN echo "V" +RUN echo "W" +RUN echo "X" +RUN echo "Y" +RUN echo "Z"`, + true) + + if err != nil { + t.Fatal(err) + } + + out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "history", "testbuildhistory")) + errorOut(err, t, fmt.Sprintf("image history failed: %v %v", out, err)) + if err != nil || exitCode != 0 { + t.Fatal("failed to get image history") + } + + actualValues := strings.Split(out, "\n")[1:27] + expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} + + for i := 0; i < 26; i++ { + echoValue := fmt.Sprintf("echo \"%s\"", expectedValues[i]) + actualValue := actualValues[i] + + if !strings.Contains(actualValue, echoValue) { + t.Fatalf("Expected layer \"%s\", but was: %s", expectedValues[i], actualValue) + } + } + + logDone("history - build history") +} + +func TestHistoryExistentImage(t *testing.T) { + historyCmd := exec.Command(dockerBinary, "history", "busybox") + _, exitCode, err := runCommandWithOutput(historyCmd) + if err != nil || exitCode != 0 { + t.Fatal("failed to get image history") + } + logDone("history - history on existent image must not fail") +} + +func TestHistoryNonExistentImage(t *testing.T) { + historyCmd := exec.Command(dockerBinary, "history", "testHistoryNonExistentImage") + _, exitCode, err := runCommandWithOutput(historyCmd) + if err == nil || exitCode == 0 { + t.Fatal("history on a non-existent image didn't result in a non-zero exit status") + } + logDone("history - history on non-existent image must fail") +} diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go new file mode 100644 index 00000000..5a7207ce --- /dev/null +++ b/integration-cli/docker_cli_images_test.go @@ -0,0 +1,62 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" + "time" +) + +func TestImagesEnsureImageIsListed(t *testing.T) { + imagesCmd := exec.Command(dockerBinary, "images") + out, _, err := runCommandWithOutput(imagesCmd) + errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + + if !strings.Contains(out, "busybox") { + t.Fatal("images should've listed busybox") + } + + logDone("images - busybox should be listed") +} + +func TestImagesOrderedByCreationDate(t *testing.T) { + defer deleteImages("order:test_a") + defer deleteImages("order:test_c") + defer deleteImages("order:test_b") + id1, err := buildImage("order:test_a", + `FROM scratch + MAINTAINER dockerio1`, true) + if err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + id2, err := buildImage("order:test_c", + `FROM scratch + MAINTAINER dockerio2`, true) + if err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + id3, err := buildImage("order:test_b", + `FROM scratch + MAINTAINER dockerio3`, true) + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) + errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + imgs := strings.Split(out, "\n") + if imgs[0] != id3 { + t.Fatalf("First image must be %s, got %s", id3, imgs[0]) + } + if imgs[1] != id2 { + t.Fatalf("Second image must be %s, got %s", id2, imgs[1]) + } + if imgs[2] != id1 { + t.Fatalf("Third image must be %s, got %s", id1, imgs[2]) + } + + logDone("images - ordering by creation date") +} diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go new file mode 100644 index 00000000..ea001fd4 --- /dev/null +++ b/integration-cli/docker_cli_import_test.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestImportDisplay(t *testing.T) { + server, err := fileServer(map[string]string{ + "/cirros.tar.gz": "/cirros.tar.gz", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + fileURL := fmt.Sprintf("%s/cirros.tar.gz", server.URL) + importCmd := exec.Command(dockerBinary, "import", fileURL) + out, _, err := runCommandWithOutput(importCmd) + if err != nil { + t.Errorf("import failed with errors: %v, output: %q", err, out) + } + + if n := strings.Count(out, "\n"); n != 2 { + t.Fatalf("display is messed up: %d '\\n' instead of 2", n) + } + + logDone("import - cirros was imported and display is fine") +} diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go new file mode 100644 index 00000000..32aa3a21 --- /dev/null +++ b/integration-cli/docker_cli_info_test.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure docker info succeeds +func TestInfoEnsureSucceeds(t *testing.T) { + versionCmd := exec.Command(dockerBinary, "info") + out, exitCode, err := runCommandWithOutput(versionCmd) + errorOut(err, t, fmt.Sprintf("encountered error while running docker info: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to execute docker info") + } + + stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"} + + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + t.Errorf("couldn't find string %v in output", linePrefix) + } + } + + logDone("info - verify that it works") +} diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go new file mode 100644 index 00000000..30a72204 --- /dev/null +++ b/integration-cli/docker_cli_inspect_test.go @@ -0,0 +1,22 @@ +package main + +import ( + "os/exec" + "strings" + "testing" +) + +func TestInspectImage(t *testing.T) { + imageTest := "scratch" + imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest) + + out, exitCode, err := runCommandWithOutput(imagesCmd) + if exitCode != 0 || err != nil { + t.Fatalf("failed to inspect image") + } + if id := strings.TrimSuffix(out, "\n"); id != imageTestID { + t.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id) + } + logDone("inspect - inspect an image") +} diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go new file mode 100644 index 00000000..6ee246f5 --- /dev/null +++ b/integration-cli/docker_cli_kill_test.go @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestKillContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + out, _, err = runCommandWithOutput(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + + listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") + out, _, err = runCommandWithOutput(listRunningContainersCmd) + errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + + if strings.Contains(out, cleanedContainerID) { + t.Fatal("killed container is still running") + } + + deleteContainer(cleanedContainerID) + + logDone("kill - kill container running sleep 10") +} + +func TestKillDifferentUserContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-u", "daemon", "-d", "busybox", "sh", "-c", "sleep 10") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + out, _, err = runCommandWithOutput(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + + listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") + out, _, err = runCommandWithOutput(listRunningContainersCmd) + errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + + if strings.Contains(out, cleanedContainerID) { + t.Fatal("killed container is still running") + } + + deleteContainer(cleanedContainerID) + + logDone("kill - kill container running sleep 10 from a different user") +} diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go new file mode 100644 index 00000000..da6f5ac2 --- /dev/null +++ b/integration-cli/docker_cli_links_test.go @@ -0,0 +1,156 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "testing" + + "github.com/docker/docker/pkg/iptables" +) + +func TestLinksEtcHostsRegularFile(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if !strings.HasPrefix(out, "-") { + t.Errorf("/etc/hosts should be a regular file") + } + + deleteAllContainers() + + logDone("link - /etc/hosts is a regular file") +} + +func TestLinksEtcHostsContentMatch(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + t.Skip("/etc/hosts does not exist, skip this test") + } + + if out != string(hosts) { + t.Errorf("container") + } + + deleteAllContainers() + + logDone("link - /etc/hosts matches hosts copy") +} + +func TestLinksPingUnlinkedContainers(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + exitCode, err := runCommand(runCmd) + + if exitCode == 0 { + t.Fatal("run ping did not fail") + } else if exitCode != 1 { + errorOut(err, t, fmt.Sprintf("run ping failed with errors: %v", err)) + } + + logDone("links - ping unlinked container") +} + +func TestLinksPingLinkedContainers(t *testing.T) { + var out string + out, _, _ = cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + idA := stripTrailingCharacters(out) + out, _, _ = cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + idB := stripTrailingCharacters(out) + cmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + cmd(t, "kill", idA) + cmd(t, "kill", idB) + deleteAllContainers() + + logDone("links - ping linked container") +} + +func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { + cmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") + + childIP := findContainerIP(t, "child") + parentIP := findContainerIP(t, "parent") + + sourceRule := []string{"FORWARD", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} + destinationRule := []string{"FORWARD", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} + if !iptables.Exists(sourceRule...) || !iptables.Exists(destinationRule...) { + t.Fatal("Iptables rules not found") + } + + cmd(t, "rm", "--link", "parent/http") + if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) { + t.Fatal("Iptables rules should be removed when unlink") + } + + cmd(t, "kill", "child") + cmd(t, "kill", "parent") + deleteAllContainers() + + logDone("link - verify iptables when link and unlink") +} + +func TestLinksInspectLinksStarted(t *testing.T) { + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + defer deleteAllContainers() + cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10") + links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") + if err != nil { + t.Fatal(err) + } + + err = unmarshalJSON([]byte(links), &result) + if err != nil { + t.Fatal(err) + } + + output := convertSliceOfStringsToMap(result) + + equal := deepEqual(expected, output) + + if !equal { + t.Fatalf("Links %s, expected %s", result, expected) + } + logDone("link - links in started container inspect") +} + +func TestLinksInspectLinksStopped(t *testing.T) { + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + defer deleteAllContainers() + cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") + if err != nil { + t.Fatal(err) + } + + err = unmarshalJSON([]byte(links), &result) + if err != nil { + t.Fatal(err) + } + + output := convertSliceOfStringsToMap(result) + + equal := deepEqual(expected, output) + + if !equal { + t.Fatalf("Links %s, but expected %s", result, expected) + } + + logDone("link - links in stopped container inspect") +} diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go new file mode 100644 index 00000000..2407291c --- /dev/null +++ b/integration-cli/docker_cli_logs_test.go @@ -0,0 +1,252 @@ +package main + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/timeutils" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func TestLogsContainerSmallerThanPage(t *testing.T) { + testLen := 32767 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo smaller than page size") +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func TestLogsContainerBiggerThanPage(t *testing.T) { + testLen := 32768 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo bigger than page size") +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func TestLogsContainerMuchBiggerThanPage(t *testing.T) { + testLen := 33000 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo much bigger than page size") +} + +func TestLogsTimestamps(t *testing.T) { + testLen := 100 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", "-t", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines := strings.Split(out, "\n") + + if len(lines) != testLen+1 { + t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + + ts := regexp.MustCompile(`^.* `) + + for _, l := range lines { + if l != "" { + _, err := time.Parse(timeutils.RFC3339NanoFixed+" ", ts.FindString(l)) + if err != nil { + t.Fatalf("Failed to parse timestamp from %v: %v", l, err) + } + if l[29] != 'Z' { // ensure we have padded 0's + t.Fatalf("Timestamp isn't padded properly: %s", l) + } + } + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs with timestamps") +} + +func TestLogsSeparateStderr(t *testing.T) { + msg := "stderr_log" + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if stdout != "" { + t.Fatalf("Expected empty stdout stream, got %v", stdout) + } + + stderr = strings.TrimSpace(stderr) + if stderr != msg { + t.Fatalf("Expected %v in stderr stream, got %v", msg, stderr) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - separate stderr (without pseudo-tty)") +} + +func TestLogsStderrInStdout(t *testing.T) { + msg := "stderr_log" + runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if stderr != "" { + t.Fatalf("Expected empty stderr stream, got %v", stdout) + } + + stdout = strings.TrimSpace(stdout) + if stdout != msg { + t.Fatalf("Expected %v in stdout stream, got %v", msg, stdout) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - stderr in stdout (with pseudo-tty)") +} + +func TestLogsTail(t *testing.T) { + testLen := 100 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", "--tail", "5", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines := strings.Split(out, "\n") + + if len(lines) != 6 { + t.Fatalf("Expected log %d lines, received %d\n", 6, len(lines)) + } + + logsCmd = exec.Command(dockerBinary, "logs", "--tail", "all", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines = strings.Split(out, "\n") + + if len(lines) != testLen+1 { + t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + + logsCmd = exec.Command(dockerBinary, "logs", "--tail", "random", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines = strings.Split(out, "\n") + + if len(lines) != testLen+1 { + t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + + deleteContainer(cleanedContainerID) + logDone("logs - logs tail") +} + +func TestLogsFollowStopped(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) + if err := logsCmd.Start(); err != nil { + t.Fatal(err) + } + + c := make(chan struct{}) + go func() { + if err := logsCmd.Wait(); err != nil { + t.Fatal(err) + } + close(c) + }() + + select { + case <-c: + case <-time.After(1 * time.Second): + t.Fatal("Following logs is hanged") + } + + deleteContainer(cleanedContainerID) + logDone("logs - logs follow stopped container") +} diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go new file mode 100644 index 00000000..3f0fa2b2 --- /dev/null +++ b/integration-cli/docker_cli_nat_test.go @@ -0,0 +1,52 @@ +package main + +import ( + "fmt" + "net" + "os/exec" + "strings" + "testing" +) + +func TestNetworkNat(t *testing.T) { + iface, err := net.InterfaceByName("eth0") + if err != nil { + t.Skip("Test not running with `make test`. Interface eth0 not found: %s", err) + } + + ifaceAddrs, err := iface.Addrs() + if err != nil || len(ifaceAddrs) == 0 { + t.Fatalf("Error retrieving addresses for eth0: %v (%d addresses)", err, len(ifaceAddrs)) + } + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + if err != nil { + t.Fatalf("Error retrieving the up for eth0: %s", err) + } + + runCmd := exec.Command(dockerBinary, "run", "-dt", "-p", "8080:8080", "busybox", "nc", "-lp", "8080") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run1 failed with errors: %v (%s)", err, out)) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", ifaceIP)) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run2 failed with errors: %v (%s)", err, out)) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to retrieve logs for container: %v %v", cleanedContainerID, err)) + out = strings.Trim(out, "\r\n") + + if expected := "hello world"; out != expected { + t.Fatalf("Unexpected output. Expected: %q, received: %q for iface %s", expected, out, ifaceIP) + } + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + out, _, err = runCommandWithOutput(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + deleteAllContainers() + + logDone("network - make sure nat works through the host") +} diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go new file mode 100644 index 00000000..ba986b9a --- /dev/null +++ b/integration-cli/docker_cli_port_test.go @@ -0,0 +1,125 @@ +package main + +import ( + "os/exec" + "sort" + "strings" + "testing" +) + +func TestPortList(t *testing.T) { + // one port + runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + firstID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "port", firstID, "80") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { + t.Error("Port list is not correct") + } + + runCmd = exec.Command(dockerBinary, "port", firstID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{"80/tcp -> 0.0.0.0:9876"}) { + t.Error("Port list is not correct") + } + runCmd = exec.Command(dockerBinary, "rm", "-f", firstID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + // three port + runCmd = exec.Command(dockerBinary, "run", "-d", + "-p", "9876:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + ID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "port", ID, "80") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { + t.Error("Port list is not correct") + } + + runCmd = exec.Command(dockerBinary, "port", ID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) { + t.Error("Port list is not correct") + } + runCmd = exec.Command(dockerBinary, "rm", "-f", ID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + // more and one port mapped to the same container port + runCmd = exec.Command(dockerBinary, "run", "-d", + "-p", "9876:80", + "-p", "9999:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + ID = stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "port", ID, "80") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) { + t.Error("Port list is not correct") + } + + runCmd = exec.Command(dockerBinary, "port", ID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "80/tcp -> 0.0.0.0:9999", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) { + t.Error("Port list is not correct\n", out) + } + runCmd = exec.Command(dockerBinary, "rm", "-f", ID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + deleteAllContainers() + + logDone("port - test port list") +} + +func assertPortList(t *testing.T, out string, expected []string) bool { + //lines := strings.Split(out, "\n") + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != len(expected) { + t.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + return false + } + sort.Strings(lines) + sort.Strings(expected) + + for i := 0; i < len(expected); i++ { + if lines[i] != expected[i] { + t.Error("|" + lines[i] + "!=" + expected[i] + "|") + return false + } + } + + return true +} diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go new file mode 100644 index 00000000..f2a7b2ab --- /dev/null +++ b/integration-cli/docker_cli_ps_test.go @@ -0,0 +1,362 @@ +package main + +import ( + "os/exec" + "strings" + "testing" + "time" +) + +func TestPsListContainers(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + firstID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + secondID := stripTrailingCharacters(out) + + // not long running + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + thirdID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + fourthID := stripTrailingCharacters(out) + + // make sure third one is not running + runCmd = exec.Command(dockerBinary, "wait", thirdID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + // all + runCmd = exec.Command(dockerBinary, "ps", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) { + t.Error("Container list is not in the correct order") + } + + // running + runCmd = exec.Command(dockerBinary, "ps") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, []string{fourthID, secondID, firstID}) { + t.Error("Container list is not in the correct order") + } + + // from here all flag '-a' is ignored + + // limit + runCmd = exec.Command(dockerBinary, "ps", "-n=2", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected := []string{fourthID, thirdID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "-n=2") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // since + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{fourthID, thirdID, secondID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // before + runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID, "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{secondID, firstID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // since & before + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{thirdID, secondID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // since & limit + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{fourthID, thirdID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // before & limit + runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{thirdID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // since & before & limit + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{thirdID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + deleteAllContainers() + + logDone("ps - test ps options") +} + +func assertContainerList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines)-1 != len(expected) { + return false + } + + containerIDIndex := strings.Index(lines[0], "CONTAINER ID") + for i := 0; i < len(expected); i++ { + foundID := lines[i+1][containerIDIndex : containerIDIndex+12] + if foundID != expected[i][:12] { + return false + } + } + + return true +} + +func TestPsListContainersSize(t *testing.T) { + name := "test_size" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + id, err := getIDByName(name) + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "ps", "-s", "-n=1") + wait := make(chan struct{}) + go func() { + out, _, err = runCommandWithOutput(runCmd) + close(wait) + }() + select { + case <-wait: + case <-time.After(3 * time.Second): + t.Fatalf("Calling \"docker ps -s\" timed out!") + } + errorOut(err, t, out) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + sizeIndex := strings.Index(lines[0], "SIZE") + idIndex := strings.Index(lines[0], "CONTAINER ID") + foundID := lines[1][idIndex : idIndex+12] + if foundID != id[:12] { + t.Fatalf("Expected id %s, got %s", id[:12], foundID) + } + expectedSize := "2 B" + foundSize := lines[1][sizeIndex:] + if foundSize != expectedSize { + t.Fatalf("Expected size %q, got %q", expectedSize, foundSize) + } + + deleteAllContainers() + logDone("ps - test ps size") +} + +func TestPsListContainersFilterStatus(t *testing.T) { + // FIXME: this should test paused, but it makes things hang and its wonky + // this is because paused containers can't be controlled by signals + + // start exited container + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + firstID := stripTrailingCharacters(out) + + // make sure the exited cintainer is not running + runCmd = exec.Command(dockerBinary, "wait", firstID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + // start running container + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + secondID := stripTrailingCharacters(out) + + // filter containers by exited + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=exited") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=running") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + containerOut = strings.TrimSpace(out) + if containerOut != secondID[:12] { + t.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out) + } + + deleteAllContainers() + + logDone("ps - test ps filter status") +} + +func TestPsListContainersFilterExited(t *testing.T) { + deleteAllContainers() + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "run", "--name", "zero1", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + firstZero, err := getIDByName("zero1") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "zero2", "busybox", "true") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + secondZero, err := getIDByName("zero2") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero1", "busybox", "false") + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + t.Fatal("Should fail.", out, err) + } + firstNonZero, err := getIDByName("nonzero1") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero2", "busybox", "false") + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + t.Fatal("Should fail.", out, err) + } + secondNonZero, err := getIDByName("nonzero2") + if err != nil { + t.Fatal(err) + } + + // filter containers by exited=0 + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + ids := strings.Split(strings.TrimSpace(out), "\n") + if len(ids) != 2 { + t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) + } + if ids[0] != secondZero { + t.Fatalf("First in list should be %q, got %q", secondZero, ids[0]) + } + if ids[1] != firstZero { + t.Fatalf("Second in list should be %q, got %q", firstZero, ids[1]) + } + + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + ids = strings.Split(strings.TrimSpace(out), "\n") + if len(ids) != 2 { + t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) + } + if ids[0] != secondNonZero { + t.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0]) + } + if ids[1] != firstNonZero { + t.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1]) + } + logDone("ps - test ps filter exited") +} diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go new file mode 100644 index 00000000..cadabde8 --- /dev/null +++ b/integration-cli/docker_cli_pull_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// FIXME: we need a test for pulling all aliases for an image (issue #8141) + +// pulling an image from the central registry should work +func TestPullImageFromCentralRegistry(t *testing.T) { + pullCmd := exec.Command(dockerBinary, "pull", "scratch") + out, exitCode, err := runCommandWithOutput(pullCmd) + errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("pulling the scratch image from the registry has failed") + } + logDone("pull - pull scratch") +} + +// pulling a non-existing image from the central registry should return a non-zero exit code +func TestPullNonExistingImage(t *testing.T) { + pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234") + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err == nil || exitCode == 0 { + t.Fatal("expected non-zero exit status when pulling non-existing image") + } + logDone("pull - pull fooblahblah1234 (non-existing image)") +} diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go new file mode 100644 index 00000000..160bb9e2 --- /dev/null +++ b/integration-cli/docker_cli_push_test.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// these tests need a freshly started empty private docker registry + +// pulling an image from the central registry should work +func TestPushBusyboxImage(t *testing.T) { + // skip this test until we're able to use a registry + t.Skip() + // tag the image to upload it tot he private registry + repoName := fmt.Sprintf("%v/busybox", privateRegistryURL) + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) + out, exitCode, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("image tagging failed") + } + + pushCmd := exec.Command(dockerBinary, "push", repoName) + out, exitCode, err = runCommandWithOutput(pushCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + deleteImages(repoName) + + if err != nil || exitCode != 0 { + t.Fatal("pushing the image to the private registry has failed") + } + logDone("push - push busybox to private registry") +} + +// pushing an image without a prefix should throw an error +func TestPushUnprefixedRepo(t *testing.T) { + // skip this test until we're able to use a registry + t.Skip() + pushCmd := exec.Command(dockerBinary, "push", "busybox") + _, exitCode, err := runCommandWithOutput(pushCmd) + + if err == nil || exitCode == 0 { + t.Fatal("pushing an unprefixed repo didn't result in a non-zero exit status") + } + logDone("push - push unprefixed busybox repo --> must fail") +} diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go new file mode 100644 index 00000000..7dc1819f --- /dev/null +++ b/integration-cli/docker_cli_restart_test.go @@ -0,0 +1,127 @@ +package main + +import ( + "os/exec" + "strings" + "testing" + "time" +) + +func TestRestartStoppedContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "foobar") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out != "foobar\n" { + t.Errorf("container should've printed 'foobar'") + } + + runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out != "foobar\nfoobar\n" { + t.Errorf("container should've printed 'foobar' twice") + } + + deleteAllContainers() + + logDone("restart - echo foobar for stopped container") +} + +func TestRestartRunningContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + time.Sleep(1 * time.Second) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out != "foobar\n" { + t.Errorf("container should've printed 'foobar'") + } + + runCmd = exec.Command(dockerBinary, "restart", "-t", "1", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + time.Sleep(1 * time.Second) + + if out != "foobar\nfoobar\n" { + t.Errorf("container should've printed 'foobar' twice") + } + + deleteAllContainers() + + logDone("restart - echo foobar for running container") +} + +// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. +func TestRestartWithVolumes(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/test", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out = strings.Trim(out, " \n\r"); out != "1" { + t.Errorf("expect 1 volume received %s", out) + } + + runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) + volumes, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, volumes) + + runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out = strings.Trim(out, " \n\r"); out != "1" { + t.Errorf("expect 1 volume after restart received %s", out) + } + + runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) + volumesAfterRestart, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, volumesAfterRestart) + + if volumes != volumesAfterRestart { + volumes = strings.Trim(volumes, " \n\r") + volumesAfterRestart = strings.Trim(volumesAfterRestart, " \n\r") + t.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart) + } + + deleteAllContainers() + + logDone("restart - does not create a new volume on restart") +} diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go new file mode 100644 index 00000000..6c8dc380 --- /dev/null +++ b/integration-cli/docker_cli_rm_test.go @@ -0,0 +1,128 @@ +package main + +import ( + "os" + "os/exec" + "strings" + "testing" +) + +func TestRmContainerWithRemovedVolume(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "losemyvolumes", "-v", "/tmp/testing:/test", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + if err := os.Remove("/tmp/testing"); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "rm", "-v", "losemyvolumes") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + + deleteAllContainers() + + logDone("rm - removed volume") +} + +func TestRmContainerWithVolume(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "foo", "-v", "/srv", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "rm", "-v", "foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("rm - volume") +} + +func TestRmRunningContainer(t *testing.T) { + createRunningContainer(t, "foo") + + // Test cannot remove running container + cmd := exec.Command(dockerBinary, "rm", "foo") + if _, err := runCommand(cmd); err == nil { + t.Fatalf("Expected error, can't rm a running container") + } + + deleteAllContainers() + + logDone("rm - running container") +} + +func TestRmForceRemoveRunningContainer(t *testing.T) { + createRunningContainer(t, "foo") + + // Stop then remove with -s + cmd := exec.Command(dockerBinary, "rm", "-f", "foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("rm - running container with --force=true") +} + +func TestRmContainerOrphaning(t *testing.T) { + dockerfile1 := `FROM busybox:latest + ENTRYPOINT ["/bin/true"]` + img := "test-container-orphaning" + dockerfile2 := `FROM busybox:latest + ENTRYPOINT ["/bin/true"] + MAINTAINER Integration Tests` + + // build first dockerfile + img1, err := buildImage(img, dockerfile1, true) + if err != nil { + t.Fatalf("Could not build image %s: %v", img, err) + } + // run container on first image + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", img)); err != nil { + t.Fatalf("Could not run image %s: %v: %s", img, err, out) + } + // rebuild dockerfile with a small addition at the end + if _, err := buildImage(img, dockerfile2, true); err != nil { + t.Fatalf("Could not rebuild image %s: %v", img, err) + } + // try to remove the image, should error out. + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", img)); err == nil { + t.Fatalf("Expected to error out removing the image, but succeeded: %s", out) + } + // check if we deleted the first image + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) + if err != nil { + t.Fatalf("%v: %s", err, out) + } + if !strings.Contains(out, img1) { + t.Fatalf("Orphaned container (could not find '%s' in docker images): %s", img1, out) + } + + deleteAllContainers() + + logDone("rm - container orphaning") +} + +func TestRmInvalidContainer(t *testing.T) { + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "unknown")); err == nil { + t.Fatal("Expected error on rm unknown container, got none") + } else if !strings.Contains(out, "failed to remove one or more containers") { + t.Fatal("Expected output to contain 'failed to remove one or more containers', got %q", out) + } + + logDone("rm - delete unknown container") +} + +func createRunningContainer(t *testing.T, name string) { + cmd := exec.Command(dockerBinary, "run", "-dt", "--name", name, "busybox", "top") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } +} diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go new file mode 100644 index 00000000..4fb150ba --- /dev/null +++ b/integration-cli/docker_cli_rmi_test.go @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestRmiWithContainerFails(t *testing.T) { + errSubstr := "is using it" + + // create a container + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + // try to delete the image + runCmd = exec.Command(dockerBinary, "rmi", "busybox") + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + t.Fatalf("Container %q is using image, should not be able to rmi: %q", cleanedContainerID, out) + } + if !strings.Contains(out, errSubstr) { + t.Fatalf("Container %q is using image, error message should contain %q: %v", cleanedContainerID, errSubstr, out) + } + + // make sure it didn't delete the busybox name + images, _, _ := cmd(t, "images") + if !strings.Contains(images, "busybox") { + t.Fatalf("The name 'busybox' should not have been removed from images: %q", images) + } + + deleteContainer(cleanedContainerID) + + logDone("rmi- container using image while rmi, should not remove image name") +} + +func TestRmiTag(t *testing.T) { + imagesBefore, _, _ := cmd(t, "images", "-a") + cmd(t, "tag", "busybox", "utest:tag1") + cmd(t, "tag", "busybox", "utest/docker:tag2") + cmd(t, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+3 { + t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + } + cmd(t, "rmi", "utest/docker:tag2") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+2 { + t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } + cmd(t, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+1 { + t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } + cmd(t, "rmi", "utest:tag1") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+0 { + t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } + logDone("tag,rmi- tagging the same images multiple times then removing tags") +} + +func TestRmiTagWithExistingContainers(t *testing.T) { + container := "test-delete-tag" + newtag := "busybox:newtag" + bb := "busybox:latest" + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", bb, newtag)); err != nil { + t.Fatalf("Could not tag busybox: %v: %s", err, out) + } + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", container, bb, "/bin/true")); err != nil { + t.Fatalf("Could not run busybox: %v: %s", err, out) + } + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", newtag)) + if err != nil { + t.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out) + } + if d := strings.Count(out, "Untagged: "); d != 1 { + t.Fatalf("Expected 1 untagged entry got %d: %q", d, out) + } + + deleteAllContainers() + + logDone("rmi - delete tag with existing containers") +} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go new file mode 100644 index 00000000..417368f4 --- /dev/null +++ b/integration-cli/docker_cli_run_test.go @@ -0,0 +1,2442 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/kr/pty" +) + +// "test123" should be printed by docker run +func TestRunEchoStdout(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "echo", "test123") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + if out != "test123\n" { + t.Errorf("container should've printed 'test123'") + } + + deleteAllContainers() + + logDone("run - echo test123") +} + +// "test" should be printed +func TestRunEchoStdoutWithMemoryLimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-m", "4m", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = strings.Trim(out, "\r\n") + + if expected := "test"; out != expected { + t.Errorf("container should've printed %q but printed %q", expected, out) + + } + + deleteAllContainers() + + logDone("run - echo with memory limit") +} + +// "test" should be printed +func TestRunEchoStdoutWitCPULimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + if out != "test\n" { + t.Errorf("container should've printed 'test'") + } + + deleteAllContainers() + + logDone("run - echo with CPU limit") +} + +// "test" should be printed +func TestRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "4m", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + if out != "test\n" { + t.Errorf("container should've printed 'test', got %q instead", out) + } + + deleteAllContainers() + + logDone("run - echo with CPU and memory limit") +} + +// "test" should be printed +func TestRunEchoNamedContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + if out != "test\n" { + t.Errorf("container should've printed 'test'") + } + + if err := deleteContainer("testfoonamedcontainer"); err != nil { + t.Errorf("failed to remove the named container: %v", err) + } + + deleteAllContainers() + + logDone("run - echo with named container") +} + +// docker run should not leak file descriptors +func TestRunLeakyFileDescriptors(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "ls", "-C", "/proc/self/fd") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + t.Errorf("container should've printed '0 1 2 3', not: %s", out) + } + + deleteAllContainers() + + logDone("run - check file descriptor leakage") +} + +// it should be possible to ping Google DNS resolver +// this will fail when Internet access is unavailable +func TestRunPingGoogle(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + errorOut(err, t, "container should've been able to ping 8.8.8.8") + + deleteAllContainers() + + logDone("run - ping 8.8.8.8") +} + +// the exit code should be 0 +// some versions of lxc might make this test fail +func TestRunExitCodeZero(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "true") + exitCode, err := runCommand(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + + if exitCode != 0 { + t.Errorf("container should've exited with exit code 0") + } + + deleteAllContainers() + + logDone("run - exit with 0") +} + +// the exit code should be 1 +// some versions of lxc might make this test fail +func TestRunExitCodeOne(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "false") + exitCode, err := runCommand(runCmd) + if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { + t.Fatal(err) + } + if exitCode != 1 { + t.Errorf("container should've exited with exit code 1") + } + + deleteAllContainers() + + logDone("run - exit with 1") +} + +// it should be possible to pipe in data via stdin to a process running in a container +// some versions of lxc might make this test fail +func TestRunStdinPipe(t *testing.T) { + runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", out) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + + waitCmd := exec.Command(dockerBinary, "wait", out) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + logsCmd := exec.Command(dockerBinary, "logs", out) + containerLogs, _, err := runCommandWithOutput(logsCmd) + errorOut(err, t, fmt.Sprintf("error thrown while trying to get container logs: %s", err)) + + containerLogs = stripTrailingCharacters(containerLogs) + + if containerLogs != "blahblah" { + t.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + rmCmd := exec.Command(dockerBinary, "rm", out) + _, _, err = runCommandWithOutput(rmCmd) + errorOut(err, t, fmt.Sprintf("rm failed to remove container %s", err)) + + deleteAllContainers() + + logDone("run - pipe in with -i -a stdin") +} + +// the container's ID should be printed when starting a container in detached mode +func TestRunDetachedContainerIDPrinting(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", out) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + + waitCmd := exec.Command(dockerBinary, "wait", out) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + rmCmd := exec.Command(dockerBinary, "rm", out) + rmOut, _, err := runCommandWithOutput(rmCmd) + errorOut(err, t, "rm failed to remove container") + + rmOut = stripTrailingCharacters(rmOut) + if rmOut != out { + t.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } + + deleteAllContainers() + + logDone("run - print container ID in detached mode") +} + +// the working directory should be set correctly +func TestRunWorkingDirectory(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-w", "/root", "busybox", "pwd") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = stripTrailingCharacters(out) + + if out != "/root" { + t.Errorf("-w failed to set working directory") + } + + runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd") + out, _, _, err = runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + out = stripTrailingCharacters(out) + + if out != "/root" { + t.Errorf("--workdir failed to set working directory") + } + + deleteAllContainers() + + logDone("run - run with working directory set by -w") + logDone("run - run with working directory set by --workdir") +} + +// pinging Google's DNS resolver should fail when we disable the networking +func TestRunWithoutNetworking(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 1 { + t.Fatal(out, err) + } + if exitCode != 1 { + t.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } + + runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 1 { + t.Fatal(out, err) + } + if exitCode != 1 { + t.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } + + deleteAllContainers() + + logDone("run - disable networking with --net=none") + logDone("run - disable networking with -n=false") +} + +// Regression test for #4741 +func TestRunWithVolumesAsFiles(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("1", out, stderr, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file") + out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("2", out, stderr, err) + } + deleteAllContainers() + + logDone("run - regression test for #4741 - volumes from as files") +} + +// Regression test for #4979 +func TestRunWithVolumesFromExited(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("1", out, stderr, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("2", out, stderr, err) + } + deleteAllContainers() + + logDone("run - regression test for #4979 - volumes-from on exited container") +} + +// Regression test for #4830 +func TestRunWithRelativePath(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true") + if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil { + t.Fatalf("relative path should result in an error") + } + + deleteAllContainers() + + logDone("run - volume with relative path") +} + +func TestRunVolumesMountedAsReadonly(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatalf("run should fail because volume is ro: exit code %d", code) + } + + deleteAllContainers() + + logDone("run - volumes as readonly mount") +} + +func TestRunVolumesFromInReadonlyMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:ro", "busybox", "touch", "/test/file") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatalf("run should fail because volume is ro: exit code %d", code) + } + + deleteAllContainers() + + logDone("run - volumes from as readonly mount") +} + +// Regression test for #1201 +func TestRunVolumesFromInReadWriteMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatalf("running --volumes-from parent:rw failed with output: %q\nerror: %v", out, err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:bar", "busybox", "touch", "/test/file") + if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "Invalid mode for volumes-from: bar") { + t.Fatalf("running --volumes-from foo:bar should have failed with invalid mount mode: %q", out) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "busybox", "touch", "/test/file") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatalf("running --volumes-from parent failed with output: %q\nerror: %v", out, err) + } + + deleteAllContainers() + + logDone("run - volumes from as read write mount") +} + +func TestVolumesFromGetsProperMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + // Expect this "rw" mode to be be ignored since the inheritted volume is "ro" + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") + if _, err := runCommand(cmd); err == nil { + t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + } + + cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/test:/test:ro", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + // Expect this to be read-only since both are "ro" + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent2:ro", "busybox", "touch", "/test/file") + if _, err := runCommand(cmd); err == nil { + t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + } + + deleteAllContainers() + + logDone("run - volumes from ignores `rw` if inherrited volume is `ro`") +} + +// Test for #1351 +func TestRunApplyVolumesFromBeforeVolumes(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "touch", "/test/foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + + deleteAllContainers() + + logDone("run - volumes from mounted first") +} + +func TestRunMultipleVolumesFrom(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent1", "-v", "/test", "busybox", "touch", "/test/foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/other", "busybox", "touch", "/other/bar") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", + "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - multiple volumes from") +} + +// this tests verifies the ID format for the container +func TestRunVerifyContainerID(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, exit, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + if exit != 0 { + t.Fatalf("expected exit code 0 received %d", exit) + } + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + t.Fatal(err) + } + if !match { + t.Fatalf("Invalid container ID: %s", out) + } + + deleteAllContainers() + + logDone("run - verify container ID") +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func TestRunCreateVolume(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-v", "/var/lib/data", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - create docker managed volume") +} + +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func TestRunCreateVolumeWithSymlink(t *testing.T) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-createvolumewithsymlink", "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN mkdir /foo && ln -s /foo /bar`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + t.Fatalf("could not build 'docker-test-createvolumewithsymlink': %v", err) + } + + cmd := exec.Command(dockerBinary, "run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", "docker-test-createvolumewithsymlink", "sh", "-c", "mount | grep -q /foo/foo") + exitCode, err := runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + var volPath string + cmd = exec.Command(dockerBinary, "inspect", "-f", "{{range .Volumes}}{{.}}{{end}}", "test-createvolumewithsymlink") + volPath, exitCode, err = runCommandWithOutput(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[inspect] err: %v, exitcode: %d", err, exitCode) + } + + cmd = exec.Command(dockerBinary, "rm", "-v", "test-createvolumewithsymlink") + exitCode, err = runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + } + + f, err := os.Open(volPath) + defer f.Close() + if !os.IsNotExist(err) { + t.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } + + deleteImages("docker-test-createvolumewithsymlink") + deleteAllContainers() + + logDone("run - create volume with symlink") +} + +// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. +func TestRunVolumesFromSymlinkPath(t *testing.T) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-volumesfromsymlinkpath", "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN mkdir /baz && ln -s /baz /foo + VOLUME ["/foo/bar"]`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + t.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + } + + cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", "docker-test-volumesfromsymlinkpath") + exitCode, err := runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls /foo | grep -q bar") + exitCode, err = runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + deleteImages("docker-test-volumesfromsymlinkpath") + deleteAllContainers() + + logDone("run - volumes-from symlink path") +} + +func TestRunExitCode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "/bin/sh", "-c", "exit 72") + + exit, err := runCommand(cmd) + if err == nil { + t.Fatal("should not have a non nil error") + } + if exit != 72 { + t.Fatalf("expected exit code 72 received %d", exit) + } + + deleteAllContainers() + + logDone("run - correct exit code") +} + +func TestRunUserDefaultsToRoot(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + t.Fatalf("expected root user got %s", out) + } + deleteAllContainers() + + logDone("run - default user") +} + +func TestRunUserByName(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "root", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + t.Fatalf("expected root user got %s", out) + } + deleteAllContainers() + + logDone("run - user by name") +} + +func TestRunUserByID(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "1", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + t.Fatalf("expected daemon user got %s", out) + } + deleteAllContainers() + + logDone("run - user by id") +} + +func TestRunUserByIDBig(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "2147483648", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + t.Fatalf("expected error about uids range, got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, id too big") +} + +func TestRunUserByIDNegative(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "-1", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + t.Fatalf("expected error about uids range, got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, id negative") +} + +func TestRunUserByIDZero(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "0", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + t.Fatalf("expected daemon user got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, zero uid") +} + +func TestRunUserNotFound(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id") + + _, err := runCommand(cmd) + if err == nil { + t.Fatal("unknown user should cause container to fail") + } + deleteAllContainers() + + logDone("run - user not found") +} + +func TestRunTwoConcurrentContainers(t *testing.T) { + group := sync.WaitGroup{} + group.Add(2) + + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + cmd := exec.Command(dockerBinary, "run", "busybox", "sleep", "2") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + }() + } + + group.Wait() + + deleteAllContainers() + + logDone("run - two concurrent containers") +} + +func TestRunEnvironment(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") + cmd.Env = append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actualEnv := strings.Split(out, "\n") + if actualEnv[len(actualEnv)-1] == "" { + actualEnv = actualEnv[:len(actualEnv)-1] + } + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } + + deleteAllContainers() + + logDone("run - verify environment") +} + +func TestRunContainerNetwork(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "127.0.0.1") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - test container network via ping") +} + +// Issue #4681 +func TestRunLoopbackWhenNetworkDisabled(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - test container loopback when networking disabled") +} + +func TestRunNetHostNotAllowedWithLinks(t *testing.T) { + _, _, err := cmd(t, "run", "--name", "linked", "busybox", "true") + + cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true") + _, _, err = runCommandWithOutput(cmd) + if err == nil { + t.Fatal("Expected error") + } + + deleteAllContainers() + + logDone("run - don't allow --net=host to be used with links") +} + +func TestRunLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } + } + + if count != 1 { + t.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + t.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + } + + deleteAllContainers() + + logDone("run - test loopback only exists when networking disabled") +} + +// #7851 hostname outside container shows FQDN, inside only shortname +// For testing purposes it is not required to set host's hostname directly +// and use "--net=host" (as the original issue submitter did), as the same +// codepath is executed with "docker run -h ". Both were manually +// tested, but this testcase takes the simpler path of using "run -h .." +func TestRunFullHostnameSet(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-h", "foo.bar.baz", "busybox", "hostname") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { + t.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + } + deleteAllContainers() + + logDone("run - test fully qualified hostname set with -h") +} + +func TestRunPrivilegedCanMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test privileged can mknod") +} + +func TestRunUnPrivilegedCanMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test un-privileged can mknod") +} + +func TestRunCapDropInvalid(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=CHPASS", "busybox", "ls") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + logDone("run - test --cap-drop=CHPASS invalid") +} + +func TestRunCapDropCannotMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-drop=MKNOD cannot mknod") +} + +func TestRunCapDropCannotMknodLowerCase(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-drop=mknod cannot mknod lowercase") +} + +func TestRunCapDropALLCannotMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-drop=ALL cannot mknod") +} + +func TestRunCapDropALLAddMknodCannotMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-drop=ALL --cap-add=MKNOD can mknod") +} + +func TestRunCapAddInvalid(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-add=CHPASS", "busybox", "ls") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + logDone("run - test --cap-add=CHPASS invalid") +} + +func TestRunCapAddCanDownInterface(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-add=NET_ADMIN can set eth0 down") +} + +func TestRunCapAddALLCanDownInterface(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-add=ALL can set eth0 down") +} + +func TestRunCapAddALLDropNetAdminCanDownInterface(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-add=ALL --cap-drop=NET_ADMIN cannot set eth0 down") +} + +func TestRunPrivilegedCanMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test privileged can mount") +} + +func TestRunUnPrivilegedCannotMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test un-privileged cannot mount") +} + +func TestRunSysNotWritableInNonPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/sys/kernel/profiling") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatal("sys should not be writable in a non privileged container") + } + + deleteAllContainers() + + logDone("run - sys not writable in non privileged container") +} + +func TestRunSysWritableInPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/sys/kernel/profiling") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("sys should be writable in privileged container") + } + + deleteAllContainers() + + logDone("run - sys writable in privileged container") +} + +func TestRunProcNotWritableInNonPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/proc/sysrq-trigger") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatal("proc should not be writable in a non privileged container") + } + + deleteAllContainers() + + logDone("run - proc not writable in non privileged container") +} + +func TestRunProcWritableInPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/proc/sysrq-trigger") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("proc should be writable in privileged container") + } + + deleteAllContainers() + + logDone("run - proc writable in privileged container") +} + +func TestRunWithCpuset(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cpuset", "0", "busybox", "true") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("container should run successfuly with cpuset of 0: %s", err) + } + + deleteAllContainers() + + logDone("run - cpuset 0") +} + +func TestRunDeviceNumbers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "ls -l /dev/null") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + t.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } + deleteAllContainers() + + logDone("run - test device numbers") +} + +func TestRunThatCharacterDevicesActLikeCharacterDevices(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + t.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } + deleteAllContainers() + + logDone("run - test that character devices work.") +} + +func TestRunUnprivilegedWithChroot(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "true") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - unprivileged with chroot") +} + +func TestRunAddingOptionalDevices(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { + t.Fatalf("expected output /dev/nulo, received %s", actual) + } + deleteAllContainers() + + logDone("run - test --device argument") +} + +func TestRunModeHostname(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { + t.Fatalf("expected 'testhostname', but says: %q", actual) + } + + cmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hostname") + + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + hostname, err := os.Hostname() + if err != nil { + t.Fatal(err) + } + if actual := strings.Trim(out, "\r\n"); actual != hostname { + t.Fatalf("expected %q, but says: '%s'", hostname, actual) + } + + deleteAllContainers() + + logDone("run - hostname and several network modes") +} + +func TestRunRootWorkdir(t *testing.T) { + s, _, err := cmd(t, "run", "--workdir", "/", "busybox", "pwd") + if err != nil { + t.Fatal(s, err) + } + if s != "/\n" { + t.Fatalf("pwd returned %q (expected /\\n)", s) + } + + deleteAllContainers() + + logDone("run - workdir /") +} + +func TestRunAllowBindMountingRoot(t *testing.T) { + s, _, err := cmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") + if err != nil { + t.Fatal(s, err) + } + + deleteAllContainers() + + logDone("run - bind mount / as volume") +} + +func TestRunDisallowBindMountingRootToRoot(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-v", "/:/", "busybox", "ls", "/host") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(out, err) + } + + deleteAllContainers() + + logDone("run - bind mount /:/ as volume should fail") +} + +// Test recursive bind mount works by default +func TestRunWithVolumesIsRecursive(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + + // Create a temporary tmpfs mount. + tmpfsDir := filepath.Join(tmpDir, "tmpfs") + if err := os.MkdirAll(tmpfsDir, 0777); err != nil { + t.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err) + } + if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil { + t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) + } + + f, err := ioutil.TempFile(tmpfsDir, "touch-me") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal(out, stderr, err) + } + if !strings.Contains(out, filepath.Base(f.Name())) { + t.Fatal("Recursive bind mount test failed. Expected file not found") + } + + deleteAllContainers() + + logDone("run - volumes are bind mounted recursively") +} + +func TestRunDnsDefaultOptions(t *testing.T) { + // ci server has default resolv.conf + // so rewrite it for the test + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + t.Fatalf("/etc/resolv.conf does not exist") + } + + // test with file + tmpResolvConf := []byte("nameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + t.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + t.Fatal(err) + } + }() + + cmd := exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf") + + actual, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Error(err, actual) + return + } + + // check that the actual defaults are there + // if we ever change the defaults from google dns, this will break + expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4" + if actual != expected { + t.Errorf("expected resolv.conf be: %q, but was: %q", expected, actual) + return + } + + deleteAllContainers() + + logDone("run - dns default options") +} + +func TestRunDnsOptions(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "nameserver 127.0.0.1 search mydomain" { + t.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual) + } + + cmd = exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf") + + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) + if actual != "nameserver 127.0.0.1" { + t.Fatalf("expected 'nameserver 127.0.0.1', but says: %q", actual) + } + + logDone("run - dns options") +} + +func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { + var out string + + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + t.Fatalf("/etc/resolv.conf does not exist") + } + + hostNamservers := resolvconf.GetNameservers(origResolvConf) + hostSearch := resolvconf.GetSearchDomains(origResolvConf) + + cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") + + if out, _, err = runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + if actualNameservers := resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "127.0.0.1" { + t.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + } + + actualSearch := resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + t.Fatalf("expected %q search domain(s), but it has: '%s'", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + t.Fatalf("expected %q domain, but says: '%s'", actualSearch[i], hostSearch[i]) + } + } + + cmd = exec.Command(dockerBinary, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + if out, _, err = runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + actualNameservers := resolvconf.GetNameservers([]byte(out)) + if len(actualNameservers) != len(hostNamservers) { + t.Fatalf("expected %q nameserver(s), but it has: '%s'", len(hostNamservers), len(actualNameservers)) + } + for i := range actualNameservers { + if actualNameservers[i] != hostNamservers[i] { + t.Fatalf("expected %q nameserver, but says: '%s'", actualNameservers[i], hostNamservers[i]) + } + } + + if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { + t.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + } + + // test with file + tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + t.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + t.Fatal(err) + } + }() + + resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + t.Fatalf("/etc/resolv.conf does not exist") + } + + hostNamservers = resolvconf.GetNameservers(resolvConf) + hostSearch = resolvconf.GetSearchDomains(resolvConf) + + cmd = exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf") + + if out, _, err = runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + if actualNameservers = resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + t.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + } + + actualSearch = resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + t.Fatalf("expected %q domain, but says: '%s'", actualSearch[i], hostSearch[i]) + } + } + + deleteAllContainers() + + logDone("run - dns options based on host resolv.conf") +} + +func TestRunAddHost(t *testing.T) { + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actual := strings.Trim(out, "\r\n") + if actual != "86.75.30.9\textra" { + t.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + } + + logDone("run - add-host option") +} + +// Regression test for #6983 +func TestRunAttachStdErrOnlyTTYMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stderr", "busybox", "true") + + exitCode, err := runCommand(cmd) + if err != nil { + t.Fatal(err) + } else if exitCode != 0 { + t.Fatalf("Container should have exited with error code 0") + } + + deleteAllContainers() + + logDone("run - Attach stderr only with -t") +} + +// Regression test for #6983 +func TestRunAttachStdOutOnlyTTYMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "busybox", "true") + + exitCode, err := runCommand(cmd) + if err != nil { + t.Fatal(err) + } else if exitCode != 0 { + t.Fatalf("Container should have exited with error code 0") + } + + deleteAllContainers() + + logDone("run - Attach stdout only with -t") +} + +// Regression test for #6983 +func TestRunAttachStdOutAndErrTTYMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") + + exitCode, err := runCommand(cmd) + if err != nil { + t.Fatal(err) + } else if exitCode != 0 { + t.Fatalf("Container should have exited with error code 0") + } + + deleteAllContainers() + + logDone("run - Attach stderr and stdout with -t") +} + +func TestRunState(t *testing.T) { + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + state, err := inspectField(id, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "true" { + t.Fatal("Container state is 'not running'") + } + pid1, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + if pid1 == "0" { + t.Fatal("Container state Pid 0") + } + + cmd = exec.Command(dockerBinary, "stop", id) + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + state, err = inspectField(id, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "false" { + t.Fatal("Container state is 'running'") + } + pid2, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + if pid2 == pid1 { + t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + + cmd = exec.Command(dockerBinary, "start", id) + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + state, err = inspectField(id, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "true" { + t.Fatal("Container state is 'not running'") + } + pid3, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + if pid3 == pid1 { + t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + logDone("run - test container state.") +} + +// Test for #1737 +func TestRunCopyVolumeUidGid(t *testing.T) { + name := "testrunvolumesuidgid" + defer deleteImages(name) + defer deleteAllContainers() + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, + true) + if err != nil { + t.Fatal(err) + } + + // Test that the uid and gid is copied from the image to the volume + cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + t.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + } + + logDone("run - copy uid/gid for volume") +} + +// Test for #1582 +func TestRunCopyVolumeContent(t *testing.T) { + name := "testruncopyvolumecontent" + defer deleteImages(name) + defer deleteAllContainers() + _, err := buildImage(name, + `FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`, + true) + if err != nil { + t.Fatal(err) + } + + // Test that the content is copied from the image to the volume + cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "find", "/hello") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { + t.Fatal("Container failed to transfer content to volume") + } + logDone("run - copy volume content") +} + +func TestRunCleanupCmdOnEntrypoint(t *testing.T) { + name := "testrunmdcleanuponentrypoint" + defer deleteImages(name) + defer deleteAllContainers() + if _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["echo"] + CMD ["testingpoint"]`, + true); err != nil { + t.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--entrypoint", "whoami", name) + out, exit, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("Error: %v, out: %q", err, out) + } + if exit != 0 { + t.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + } + out = strings.TrimSpace(out) + if out != "root" { + t.Fatalf("Expected output root, got %q", out) + } + logDone("run - cleanup cmd on --entrypoint") +} + +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func TestRunWorkdirExistsAndIsFile(t *testing.T) { + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "run", "-w", "/bin/cat", "busybox") + out, exit, err := runCommandWithOutput(runCmd) + if !(err != nil && exit == 1 && strings.Contains(out, "Cannot mkdir: /bin/cat is not a directory")) { + t.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err) + } + logDone("run - error on existing file for workdir") +} + +func TestRunExitOnStdinClose(t *testing.T) { + name := "testrunexitonstdinclose" + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", "/bin/cat") + + stdin, err := runCmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + stdout, err := runCmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if err := runCmd.Start(); err != nil { + t.Fatal(err) + } + if _, err := stdin.Write([]byte("hello\n")); err != nil { + t.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + t.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "hello" { + t.Fatalf("Output should be 'hello', got '%q'", line) + } + if err := stdin.Close(); err != nil { + t.Fatal(err) + } + finish := make(chan struct{}) + go func() { + if err := runCmd.Wait(); err != nil { + t.Fatal(err) + } + close(finish) + }() + select { + case <-finish: + case <-time.After(1 * time.Second): + t.Fatal("docker run failed to exit on stdin close") + } + state, err := inspectField(name, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "false" { + t.Fatal("Container must be stopped after stdin closing") + } + logDone("run - exit on stdin closing") +} + +// Test for #2267 +func TestRunWriteHostsFileAndNotCommit(t *testing.T) { + name := "writehosts" + cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "test2267") { + t.Fatal("/etc/hosts should contain 'test2267'") + } + + cmd = exec.Command(dockerBinary, "diff", name) + if err != nil { + t.Fatal(err, out) + } + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if len(strings.Trim(out, "\r\n")) != 0 { + t.Fatal("diff should be empty") + } + + logDone("run - write to /etc/hosts and not commited") +} + +// Test for #2267 +func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { + name := "writehostname" + cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "test2267") { + t.Fatal("/etc/hostname should contain 'test2267'") + } + + cmd = exec.Command(dockerBinary, "diff", name) + if err != nil { + t.Fatal(err, out) + } + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if len(strings.Trim(out, "\r\n")) != 0 { + t.Fatal("diff should be empty") + } + + logDone("run - write to /etc/hostname and not commited") +} + +// Test for #2267 +func TestRunWriteResolvFileAndNotCommit(t *testing.T) { + name := "writeresolv" + cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "test2267") { + t.Fatal("/etc/resolv.conf should contain 'test2267'") + } + + cmd = exec.Command(dockerBinary, "diff", name) + if err != nil { + t.Fatal(err, out) + } + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if len(strings.Trim(out, "\r\n")) != 0 { + t.Fatal("diff should be empty") + } + + logDone("run - write to /etc/resolv.conf and not commited") +} + +func TestRunWithBadDevice(t *testing.T) { + name := "baddevice" + cmd := exec.Command(dockerBinary, "run", "--name", name, "--device", "/etc", "busybox", "true") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("Run should fail with bad device") + } + expected := `"/etc": not a device node` + if !strings.Contains(out, expected) { + t.Fatalf("Output should contain %q, actual out: %q", expected, out) + } + logDone("run - error with bad device") +} + +func TestRunEntrypoint(t *testing.T) { + name := "entrypoint" + cmd := exec.Command(dockerBinary, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + expected := "foobar" + if out != expected { + t.Fatalf("Output should be %q, actual out: %q", expected, out) + } + logDone("run - entrypoint") +} + +func TestRunBindMounts(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-test-container") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + writeFile(path.Join(tmpDir, "touch-me"), "", t) + + // Test reading from a read-only bind mount + cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox", "ls", "/tmp") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "touch-me") { + t.Fatal("Container failed to read from bind mount") + } + + // test writing to bind mount + cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist + + // test mounting to an illegal destination directory + cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") + _, err = runCommand(cmd) + if err == nil { + t.Fatal("Container bind mounted illegal directory") + } + + // test mount a file + cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") + _, err = runCommand(cmd) + if err != nil { + t.Fatal(err, out) + } + content := readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist + expected := "yotta" + if content != expected { + t.Fatalf("Output should be %q, actual out: %q", expected, content) + } + + logDone("run - bind mounts") +} + +func TestRunMutableNetworkFiles(t *testing.T) { + defer deleteAllContainers() + + for _, fn := range []string{"resolv.conf", "hosts"} { + deleteAllContainers() + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s; while true; do sleep 1; done", fn))) + if err != nil { + t.Fatal(err, out) + } + + time.Sleep(1 * time.Second) + + contID := strings.TrimSpace(out) + + f, err := os.Open(filepath.Join("/var/lib/docker/containers", contID, fn)) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadAll(f) + f.Close() + + if strings.TrimSpace(string(content)) != "success" { + t.Fatal("Content was not what was modified in the container", string(content)) + } + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "sh", "-c", fmt.Sprintf("while true; do cat /etc/%s; sleep 1; done", fn))) + if err != nil { + t.Fatal(err) + } + + contID = strings.TrimSpace(out) + + resolvConfPath := filepath.Join("/var/lib/docker/containers", contID, fn) + + f, err = os.OpenFile(resolvConfPath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + if err != nil { + t.Fatal(err) + } + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + t.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + t.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + t.Fatal(err) + } + + f.Close() + + time.Sleep(2 * time.Second) // don't race sleep + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "logs", "c2")) + if err != nil { + t.Fatal(err) + } + + lines := strings.Split(out, "\n") + if strings.TrimSpace(lines[len(lines)-2]) != "success2" { + t.Fatalf("Did not find the correct output in /etc/%s: %s %#v", fn, out, lines) + } + } +} + +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func TestRunCidFileCleanupIfEmpty(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpCidFile := path.Join(tmpDir, "cid") + cmd := exec.Command(dockerBinary, "run", "--cidfile", tmpCidFile, "scratch") + out, _, err := runCommandWithOutput(cmd) + t.Log(out) + if err == nil { + t.Fatal("Run without command must fail") + } + + if _, err := os.Stat(tmpCidFile); err == nil { + t.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + } + deleteAllContainers() + logDone("run - cleanup empty cidfile on fail") +} + +// #2098 - Docker cidFiles only contain short version of the containerId +//sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid +func TestRunCidFileCheckIDLength(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + t.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + defer os.RemoveAll(tmpDir) + cmd := exec.Command(dockerBinary, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + buffer, err := ioutil.ReadFile(tmpCidFile) + if err != nil { + t.Fatal(err) + } + cid := string(buffer) + if len(cid) != 64 { + t.Fatalf("--cidfile should be a long id, not %q", id) + } + if cid != id { + t.Fatalf("cid must be equal to %s, got %s", id, cid) + } + deleteAllContainers() + logDone("run - cidfile contains long id") +} + +func TestRunNetworkNotInitializedNoneMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "--net=none", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + res, err := inspectField(id, "NetworkSettings.IPAddress") + if err != nil { + t.Fatal(err) + } + if res != "" { + t.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + } + deleteAllContainers() + logDone("run - network must not be initialized in 'none' mode") +} + +func TestRunDeallocatePortOnMissingIptablesRule(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + ip, err := inspectField(id, "NetworkSettings.IPAddress") + if err != nil { + t.Fatal(err) + } + iptCmd := exec.Command("iptables", "-D", "FORWARD", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") + out, _, err = runCommandWithOutput(iptCmd) + if err != nil { + t.Fatal(err, out) + } + if err := deleteContainer(id); err != nil { + t.Fatal(err) + } + cmd = exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + deleteAllContainers() + logDone("run - port should be deallocated even on iptables error") +} + +func TestRunPortInUse(t *testing.T) { + port := "1234" + l, err := net.Listen("tcp", ":"+port) + if err != nil { + t.Fatal(err) + } + defer l.Close() + cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatalf("Binding on used port must fail") + } + if !strings.Contains(out, "address already in use") { + t.Fatalf("Out must be about \"address already in use\", got %s", out) + } + + deleteAllContainers() + logDone("run - fail if port already in use") +} + +// https://github.com/docker/docker/issues/8428 +func TestRunPortProxy(t *testing.T) { + defer deleteAllContainers() + + port := "12345" + cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("Failed to run and bind port %s, output: %s, error: %s", port, out, err) + } + + // connect for 10 times here. This will trigger 10 EPIPES in the child + // process and kill it when it writes to a closed stdout/stderr + for i := 0; i < 10; i++ { + net.Dial("tcp", fmt.Sprintf("0.0.0.0:%s", port)) + } + + listPs := exec.Command("sh", "-c", "ps ax | grep docker") + out, _, err = runCommandWithOutput(listPs) + if err != nil { + t.Errorf("list docker process failed with output %s, error %s", out, err) + } + if strings.Contains(out, "docker ") { + t.Errorf("Unexpected defunct docker process") + } + if !strings.Contains(out, "docker-proxy -proto tcp -host-ip 0.0.0.0 -host-port 12345") { + t.Errorf("Failed to find docker-proxy process, got %s", out) + } + + logDone("run - proxy should work with unavailable port") +} + +// Regression test for #7792 +func TestRunMountOrdering(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + // Create a temporary tmpfs mount. + fooDir := filepath.Join(tmpDir, "foo") + if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { + t.Fatalf("failed to mkdir at %s - %s", fooDir, err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + + deleteAllContainers() + logDone("run - volumes are mounted in the correct order") +} + +func TestRunExecDir(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + execDir := filepath.Join(execDriverPath, id) + stateFile := filepath.Join(execDir, "state.json") + contFile := filepath.Join(execDir, "container.json") + + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err != nil { + t.Fatal(err) + } + fi, err = os.Stat(contFile) + if err != nil { + t.Fatal(err) + } + } + + stopCmd := exec.Command(dockerBinary, "stop", id) + out, _, err = runCommandWithOutput(stopCmd) + if err != nil { + t.Fatal(err, out) + } + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err == nil { + t.Fatalf("Statefile %q is exists for stopped container!", stateFile) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + fi, err = os.Stat(contFile) + if err == nil { + t.Fatalf("Container file %q is exists for stopped container!", contFile) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + } + startCmd := exec.Command(dockerBinary, "start", id) + out, _, err = runCommandWithOutput(startCmd) + if err != nil { + t.Fatal(err, out) + } + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err != nil { + t.Fatal(err) + } + fi, err = os.Stat(contFile) + if err != nil { + t.Fatal(err) + } + } + rmCmd := exec.Command(dockerBinary, "rm", "-f", id) + out, _, err = runCommandWithOutput(rmCmd) + if err != nil { + t.Fatal(err, out) + } + { + _, err := os.Stat(execDir) + if err == nil { + t.Fatal(err) + } + if err == nil { + t.Fatalf("Exec directory %q is exists for removed container!", execDir) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + } + + logDone("run - check execdriver dir behavior") +} + +// #6509 +func TestRunRedirectStdout(t *testing.T) { + + defer deleteAllContainers() + + checkRedirect := func(command string) { + _, tty, err := pty.Open() + if err != nil { + t.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + ch := make(chan struct{}) + if err := cmd.Start(); err != nil { + t.Fatalf("start err: %v", err) + } + go func() { + if err := cmd.Wait(); err != nil { + t.Fatalf("wait err=%v", err) + } + close(ch) + }() + + select { + case <-time.After(time.Second): + t.Fatal("command timeout") + case <-ch: + } + } + + checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") + checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") + + logDone("run - redirect stdout") +} + +// Regression test for https://github.com/docker/docker/issues/8259 +func TestRunReuseBindVolumeThatIsSymlink(t *testing.T) { + tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + linkPath := os.TempDir() + "/testlink2" + if err := os.Symlink(tmpDir, linkPath); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(linkPath) + + // Create first container + cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + // Create second container with same symlinked path + // This will fail if the referenced issue is hit with a "Volume exists" error + cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + deleteAllContainers() + logDone("run - can remount old bindmount volume") +} + +func TestVolumesNoCopyData(t *testing.T) { + defer deleteImages("dataimage") + defer deleteAllContainers() + if _, err := buildImage("dataimage", + `FROM busybox + RUN mkdir -p /foo + RUN touch /foo/bar`, + true); err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "--name", "test", "-v", "/foo", "busybox") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar") + if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") { + t.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + } + + tmpDir, err := ioutil.TempDir("", "docker_test_bind_mount_copy_data") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + + cmd = exec.Command(dockerBinary, "run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar") + if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") { + t.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + } + + logDone("run - volumes do not copy data for volumes-from and bindmounts") +} + +func TestRunVolumesNotRecreatedOnStart(t *testing.T) { + // Clear out any remnants from other tests + deleteAllContainers() + info, err := ioutil.ReadDir(volumesConfigPath) + if err != nil { + t.Fatal(err) + } + if len(info) > 0 { + for _, f := range info { + if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil { + t.Fatal(err) + } + } + } + + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "start", "lone_starr") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + info, err = ioutil.ReadDir(volumesConfigPath) + if err != nil { + t.Fatal(err) + } + if len(info) != 1 { + t.Fatalf("Expected only 1 volume have %v", len(info)) + } + + logDone("run - volumes not recreated on start") +} + +func TestRunNoOutputFromPullInStdout(t *testing.T) { + defer deleteAllContainers() + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + t.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + t.Fatalf("Stdout contains output from pull: %s", stdout) + } + logDone("run - no output from pull in stdout") +} + +func TestRunVolumesCleanPaths(t *testing.T) { + defer deleteAllContainers() + + if _, err := buildImage("run_volumes_clean_paths", + `FROM busybox + VOLUME /foo/`, + true); err != nil { + t.Fatal(err) + } + defer deleteImages("run_volumes_clean_paths") + + cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + out, err := inspectFieldMap("dark_helmet", "Volumes", "/foo/") + if err != nil { + t.Fatal(err) + } + if out != "" { + t.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out) + } + + out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo") + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out, volumesStoragePath) { + t.Fatalf("Volume was not defined for /foo\n%q", out) + } + + out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar/") + if err != nil { + t.Fatal(err) + } + if out != "" { + t.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out) + } + out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar") + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out, volumesStoragePath) { + t.Fatalf("Volume was not defined for /bar\n%q", out) + } + + logDone("run - volume paths are cleaned") +} diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go new file mode 100644 index 00000000..f15e6b51 --- /dev/null +++ b/integration-cli/docker_cli_save_load_test.go @@ -0,0 +1,395 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "testing" +) + +// save a repo and try to load it using stdout +func TestSaveAndLoadRepoStdout(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + before, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err)) + + saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load` + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + after, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err)) + + if before != after { + t.Fatalf("inspect is not the same after a save / load") + } + + deleteContainer(cleanedContainerID) + deleteImages(repoName) + + os.Remove("/tmp/foobar-save-load-test.tar") + + logDone("save - save a repo using stdout") + logDone("load - load a repo using stdout") +} + +// save a repo using gz compression and try to load it using stdout +func TestSaveXzAndLoadRepoStdout(t *testing.T) { + tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz") + + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("failed to create a container: %v %v", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test-xz-gz" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) + } + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + if err != nil { + t.Fatalf("failed to commit container: %v %v", out, err) + } + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + before, _, err := runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("the repo should exist before saving it: %v %v", before, err) + } + + saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + if err != nil { + t.Fatalf("failed to save repo: %v %v", out, err) + } + + deleteImages(repoName) + + loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath) + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + if err == nil { + t.Fatalf("expected error, but succeeded with no error and output: %v", out) + } + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + after, _, err := runCommandWithOutput(inspectCmd) + if err == nil { + t.Fatalf("the repo should not exist: %v", after) + } + + deleteContainer(cleanedContainerID) + deleteImages(repoName) + + logDone("load - save a repo with xz compression & load it using stdout") +} + +// save a repo using xz+gz compression and try to load it using stdout +func TestSaveXzGzAndLoadRepoStdout(t *testing.T) { + tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz") + + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("failed to create a container: %v %v", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test-xz-gz" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) + } + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + if err != nil { + t.Fatalf("failed to commit container: %v %v", out, err) + } + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + before, _, err := runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("the repo should exist before saving it: %v %v", before, err) + } + + saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + if err != nil { + t.Fatalf("failed to save repo: %v %v", out, err) + } + + deleteImages(repoName) + + loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath) + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + if err == nil { + t.Fatalf("expected error, but succeeded with no error and output: %v", out) + } + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + after, _, err := runCommandWithOutput(inspectCmd) + if err == nil { + t.Fatalf("the repo should not exist: %v", after) + } + + deleteContainer(cleanedContainerID) + deleteImages(repoName) + + logDone("load - save a repo with xz+gz compression & load it using stdout") +} + +func TestSaveSingleTag(t *testing.T) { + repoName := "foobar-save-single-tag-test" + + tagCmdFinal := fmt.Sprintf("%v tag busybox:latest %v:latest", dockerBinary, repoName) + tagCmd := exec.Command("bash", "-c", tagCmdFinal) + out, _, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + + idCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName) + idCmd := exec.Command("bash", "-c", idCmdFinal) + out, _, err = runCommandWithOutput(idCmd) + errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + saveCmdFinal := fmt.Sprintf("%v save %v:latest | tar t | grep -E '(^repositories$|%v)'", dockerBinary, repoName, cleanedImageID) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo with image ID and 'repositories' file: %v %v", out, err)) + + deleteImages(repoName) + + logDone("save - save a specific image:tag") +} + +func TestSaveImageId(t *testing.T) { + repoName := "foobar-save-image-id-test" + + tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v:latest", dockerBinary, repoName) + tagCmd := exec.Command("bash", "-c", tagCmdFinal) + out, _, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + + idLongCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName) + idLongCmd := exec.Command("bash", "-c", idLongCmdFinal) + out, _, err = runCommandWithOutput(idLongCmd) + errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err)) + + cleanedLongImageID := stripTrailingCharacters(out) + + idShortCmdFinal := fmt.Sprintf("%v images -q %v", dockerBinary, repoName) + idShortCmd := exec.Command("bash", "-c", idShortCmdFinal) + out, _, err = runCommandWithOutput(idShortCmd) + errorOut(err, t, fmt.Sprintf("failed to get repo short ID: %v %v", out, err)) + + cleanedShortImageID := stripTrailingCharacters(out) + + saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep %v", dockerBinary, cleanedShortImageID, cleanedLongImageID) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo with image ID: %v %v", out, err)) + + deleteImages(repoName) + + logDone("save - save a image by ID") +} + +// save a repo and try to load it using flags +func TestSaveAndLoadRepoFlags(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + before, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err)) + + saveCmdTemplate := `%v save -o /tmp/foobar-save-load-test.tar %v` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + loadCmdFinal := `docker load -i /tmp/foobar-save-load-test.tar` + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + after, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err)) + + if before != after { + t.Fatalf("inspect is not the same after a save / load") + } + + deleteContainer(cleanedContainerID) + deleteImages(repoName) + + os.Remove("/tmp/foobar-save-load-test.tar") + + logDone("save - save a repo using -o") + logDone("load - load a repo using -i") +} + +func TestSaveMultipleNames(t *testing.T) { + repoName := "foobar-save-multi-name-test" + + // Make one image + tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v-one:latest", dockerBinary, repoName) + tagCmd := exec.Command("bash", "-c", tagCmdFinal) + out, _, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + // Make two images + tagCmdFinal = fmt.Sprintf("%v tag scratch:latest %v-two:latest", dockerBinary, repoName) + tagCmd = exec.Command("bash", "-c", tagCmdFinal) + out, _, err = runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + + saveCmdFinal := fmt.Sprintf("%v save %v-one %v-two:latest | tar xO repositories | grep -q -E '(-one|-two)'", dockerBinary, repoName, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save multiple repos: %v %v", out, err)) + + deleteImages(repoName) + + logDone("save - save by multiple names") +} + +// Issue #6722 #5892 ensure directories are included in changes +func TestSaveDirectoryPermissions(t *testing.T) { + layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + + name := "save-directory-permissions" + tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + + if err != nil { + t.Errorf("failed to create temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + defer deleteImages(name) + _, err = buildImage(name, + `FROM busybox + RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, + true) + if err != nil { + t.Fatal(err) + } + + saveCmdFinal := fmt.Sprintf("%s save %s | tar -xf - -C %s", dockerBinary, name, extractionDirectory) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err := runCommandWithOutput(saveCmd) + if err != nil { + t.Errorf("failed to save and extract image: %s", out) + } + + dirs, err := ioutil.ReadDir(extractionDirectory) + if err != nil { + t.Errorf("failed to get a listing of the layer directories: %s", err) + } + + found := false + for _, entry := range dirs { + if entry.IsDir() { + layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") + + f, err := os.Open(layerPath) + if err != nil { + t.Fatalf("failed to open %s: %s", layerPath, err) + } + + entries, err := ListTar(f) + if err != nil { + t.Fatalf("encountered error while listing tar entries: %s", err) + } + + if reflect.DeepEqual(entries, layerEntries) || reflect.DeepEqual(entries, layerEntriesAUFS) { + found = true + break + } + } + } + + if !found { + t.Fatalf("failed to find the layer with the right content listing") + } + + logDone("save - ensure directories exist in exported layers") +} diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go new file mode 100644 index 00000000..e8b9efdc --- /dev/null +++ b/integration-cli/docker_cli_search_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// search for repos named "registry" on the central registry +func TestSearchOnCentralRegistry(t *testing.T) { + searchCmd := exec.Command(dockerBinary, "search", "busybox") + out, exitCode, err := runCommandWithOutput(searchCmd) + errorOut(err, t, fmt.Sprintf("encountered error while searching: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to search on the central registry") + } + + if !strings.Contains(out, "Busybox base image.") { + t.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'") + } + + logDone("search - search for repositories named (or containing) 'Busybox base image.'") +} diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go new file mode 100644 index 00000000..addc781c --- /dev/null +++ b/integration-cli/docker_cli_start_test.go @@ -0,0 +1,67 @@ +package main + +import ( + "os/exec" + "strings" + "testing" + "time" +) + +// Regression test for https://github.com/docker/docker/issues/7843 +func TestStartAttachReturnsOnError(t *testing.T) { + defer deleteAllContainers() + + cmd(t, "run", "-d", "--name", "test", "busybox") + cmd(t, "stop", "test") + + // Expect this to fail because the above container is stopped, this is what we want + if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil { + t.Fatal("Expected error but got none") + } + + ch := make(chan struct{}) + go func() { + // Attempt to start attached to the container that won't start + // This should return an error immediately since the container can't be started + if _, err := runCommand(exec.Command(dockerBinary, "start", "-a", "test2")); err == nil { + t.Fatal("Expected error but got none") + } + close(ch) + }() + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("Attach did not exit properly") + } + + logDone("start - error on start with attach exits") +} + +// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s +func TestStartVolumesFromFailsCleanly(t *testing.T) { + defer deleteAllContainers() + + // Create the first data volume + cmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") + + // Expect this to fail because the data test after contaienr doesn't exist yet + if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil { + t.Fatal("Expected error but got none") + } + + // Create the second data volume + cmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") + + // Now, all the volumes should be there + cmd(t, "start", "consumer") + + // Check that we have the volumes we want + out, _, _ := cmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") + n_volumes := strings.Trim(out, " \r\n'") + if n_volumes != "2" { + t.Fatalf("Missing volumes: expected 2, got %s", n_volumes) + } + + logDone("start - missing containers in --volumes-from did not affect subsequent runs") +} diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go new file mode 100644 index 00000000..815416f2 --- /dev/null +++ b/integration-cli/docker_cli_tag_test.go @@ -0,0 +1,90 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// tagging a named image in a new unprefixed repo should work +func TestTagUnprefixedRepoByName(t *testing.T) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "testfoobarbaz") + out, _, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + deleteImages("testfoobarbaz") + + logDone("tag - busybox -> testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func TestTagUnprefixedRepoByID(t *testing.T) { + getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox") + out, _, err := runCommandWithOutput(getIDCmd) + errorOut(err, t, fmt.Sprintf("failed to get the image ID of busybox: %v", err)) + + cleanedImageID := stripTrailingCharacters(out) + tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz") + out, _, err = runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + + deleteImages("testfoobarbaz") + + logDone("tag - busybox's image ID -> testfoobarbaz") +} + +// ensure we don't allow the use of invalid repository names; these tag operations should fail +func TestTagInvalidUnprefixedRepo(t *testing.T) { + + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd"} + + for _, repo := range invalidRepos { + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo) + _, _, err := runCommandWithOutput(tagCmd) + if err == nil { + t.Fatalf("tag busybox %v should have failed", repo) + } + } + logDone("tag - busybox invalid repo names --> must fail") +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func TestTagInvalidPrefixedRepo(t *testing.T) { + long_tag := makeRandomString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", long_tag} + + for _, repotag := range invalidTags { + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repotag) + _, _, err := runCommandWithOutput(tagCmd) + if err == nil { + t.Fatalf("tag busybox %v should have failed", repotag) + } + } + logDone("tag - busybox with invalid repo:tagnames --> must fail") +} + +// ensure we allow the use of valid tags +func TestTagValidPrefixedRepo(t *testing.T) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t"} + + for _, repo := range validRepos { + tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", repo) + _, _, err := runCommandWithOutput(tagCmd) + if err != nil { + t.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + deleteImages(repo) + logMessage := fmt.Sprintf("tag - busybox %v", repo) + logDone(logMessage) + } +} diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go new file mode 100644 index 00000000..f3ff15bc --- /dev/null +++ b/integration-cli/docker_cli_top_test.go @@ -0,0 +1,91 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestTopMultipleArgs(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + topCmd := exec.Command(dockerBinary, "top", cleanedContainerID, "-o", "pid") + out, _, err = runCommandWithOutput(topCmd) + errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + + if !strings.Contains(out, "PID") { + errorOut(nil, t, fmt.Sprintf("did not see PID after top -o pid")) + } + + logDone("top - multiple arguments") +} + +func TestTopNonPrivileged(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) + out, _, err = runCommandWithOutput(topCmd) + errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + + topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) + out2, _, err2 := runCommandWithOutput(topCmd) + errorOut(err2, t, fmt.Sprintf("failed to run top: %v %v", out2, err2)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + _, err = runCommand(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + + deleteContainer(cleanedContainerID) + + if !strings.Contains(out, "sleep 20") && !strings.Contains(out2, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") + } else if !strings.Contains(out, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") + } else if !strings.Contains(out2, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") + } + + logDone("top - sleep process should be listed in non privileged mode") +} + +func TestTopPrivileged(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--privileged", "-i", "-d", "busybox", "sleep", "20") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) + out, _, err = runCommandWithOutput(topCmd) + errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + + topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) + out2, _, err2 := runCommandWithOutput(topCmd) + errorOut(err2, t, fmt.Sprintf("failed to run top: %v %v", out2, err2)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + _, err = runCommand(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + + deleteContainer(cleanedContainerID) + + if !strings.Contains(out, "sleep 20") && !strings.Contains(out2, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") + } else if !strings.Contains(out, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") + } else if !strings.Contains(out2, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") + } + + logDone("top - sleep process should be listed in privileged mode") +} diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go new file mode 100644 index 00000000..7f1838e5 --- /dev/null +++ b/integration-cli/docker_cli_version_test.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure docker version works +func TestVersionEnsureSucceeds(t *testing.T) { + versionCmd := exec.Command(dockerBinary, "version") + out, exitCode, err := runCommandWithOutput(versionCmd) + errorOut(err, t, fmt.Sprintf("encountered error while running docker version: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to execute docker version") + } + + stringsToCheck := []string{ + "Client version:", + "Client API version:", + "Go version (client):", + "Git commit (client):", + "Server version:", + "Server API version:", + "Go version (server):", + "Git commit (server):", + } + + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + t.Errorf("couldn't find string %v in output", linePrefix) + } + } + + logDone("version - verify that it works and that the output is properly formatted") +} diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go new file mode 100644 index 00000000..23903a39 --- /dev/null +++ b/integration-cli/docker_test_vars.go @@ -0,0 +1,47 @@ +package main + +import ( + "fmt" + "os" + "os/exec" +) + +var ( + // the docker binary to use + dockerBinary = "docker" + + // the private registry image to use for tests involving the registry + registryImageName = "registry" + + // the private registry to use for tests + privateRegistryURL = "127.0.0.1:5000" + + dockerBasePath = "/var/lib/docker" + execDriverPath = dockerBasePath + "/execdriver/native" + volumesConfigPath = dockerBasePath + "/volumes" + volumesStoragePath = dockerBasePath + "/vfs/dir" + + workingDirectory string +) + +func init() { + if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { + dockerBinary = dockerBin + } else { + whichCmd := exec.Command("which", "docker") + out, _, err := runCommandWithOutput(whichCmd) + if err == nil { + dockerBinary = stripTrailingCharacters(out) + } else { + fmt.Printf("ERROR: couldn't resolve full path to the Docker binary") + os.Exit(1) + } + } + if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { + registryImageName = registryImage + } + if registry := os.Getenv("REGISTRY_URL"); registry != "" { + privateRegistryURL = registry + } + workingDirectory, _ = os.Getwd() +} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go new file mode 100644 index 00000000..3bdf36ec --- /dev/null +++ b/integration-cli/docker_utils.go @@ -0,0 +1,714 @@ +package main + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "testing" + "time" +) + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + t *testing.T + logFile *os.File + folder string + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + execDriver string + wait chan error +} + +// NewDaemon returns a Daemon instance to be used for testing. +// This will create a directory such as daemon123456789 in the folder specified by $DEST. +// The daemon will not automatically start. +func NewDaemon(t *testing.T) *Daemon { + dest := os.Getenv("DEST") + if dest == "" { + t.Fatal("Please set the DEST environment variable") + } + + dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().Unix())) + daemonFolder, err := filepath.Abs(dir) + if err != nil { + t.Fatalf("Could not make '%s' an absolute path: %v", dir, err) + } + + if err := os.MkdirAll(filepath.Join(daemonFolder, "graph"), 0600); err != nil { + t.Fatalf("Could not create %s/graph directory", daemonFolder) + } + + return &Daemon{ + t: t, + folder: daemonFolder, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + execDriver: os.Getenv("DOCKER_EXECDRIVER"), + } +} + +// Start will start the daemon and return once it is ready to receive requests. +// You can specify additional daemon flags. +func (d *Daemon) Start(arg ...string) error { + dockerBinary, err := exec.LookPath(dockerBinary) + if err != nil { + d.t.Fatalf("could not find docker binary in $PATH: %v", err) + } + + args := []string{ + "--host", d.sock(), + "--daemon", "--debug", + "--graph", fmt.Sprintf("%s/graph", d.folder), + "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), + } + if d.storageDriver != "" { + args = append(args, "--storage-driver", d.storageDriver) + } + if d.execDriver != "" { + args = append(args, "--exec-driver", d.execDriver) + } + + args = append(args, arg...) + d.cmd = exec.Command(dockerBinary, args...) + + d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + d.t.Fatalf("Could not create %s/docker.log: %v", d.folder, err) + } + + d.cmd.Stdout = d.logFile + d.cmd.Stderr = d.logFile + + if err := d.cmd.Start(); err != nil { + return fmt.Errorf("could not start daemon container: %v", err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.t.Log("exiting daemon") + close(wait) + }() + + d.wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + for { + d.t.Log("waiting for daemon to start") + select { + case <-time.After(2 * time.Second): + return errors.New("timeout: daemon does not respond") + case <-tick: + c, err := net.Dial("unix", filepath.Join(d.folder, "docker.sock")) + if err != nil { + continue + } + + client := httputil.NewClientConn(c, nil) + defer client.Close() + + req, err := http.NewRequest("GET", "/_ping", nil) + if err != nil { + d.t.Fatalf("could not create new request: %v", err) + } + + resp, err := client.Do(req) + if err != nil { + continue + } + if resp.StatusCode != http.StatusOK { + d.t.Logf("received status != 200 OK: %s", resp.Status) + } + + d.t.Log("daemon started") + return nil + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(arg ...string) error { + if err := d.Start(arg...); err != nil { + return err + } + bb := filepath.Join(d.folder, "busybox.tar") + if _, err := os.Stat(bb); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) + } + // saving busybox image from main daemon + if err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").Run(); err != nil { + return fmt.Errorf("could not save busybox image: %v", err) + } + } + // loading busybox image to this daemon + if _, err := d.Cmd("load", "--input", bb); err != nil { + return fmt.Errorf("could not load busybox image: %v", err) + } + if err := os.Remove(bb); err != nil { + d.t.Logf("Could not remove %s: %v", bb, err) + } + return nil +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) Stop() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } +out: + for { + select { + case err := <-d.wait: + return err + case <-time.After(20 * time.Second): + d.t.Log("timeout") + break out + case <-tick: + d.t.Logf("Attempt #%d: daemon is still running with pid %d", i+1, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } + i++ + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.t.Logf("Could not kill daemon: %v", err) + return err + } + + return nil +} + +// Restart will restart the daemon by first stopping it and then starting it. +func (d *Daemon) Restart(arg ...string) error { + d.Stop() + return d.Start(arg...) +} + +func (d *Daemon) sock() string { + return fmt.Sprintf("unix://%s/docker.sock", d.folder) +} + +// Cmd will execute a docker CLI command against this Daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(name string, arg ...string) (string, error) { + args := []string{"--host", d.sock(), name} + args = append(args, arg...) + c := exec.Command(dockerBinary, args...) + b, err := c.CombinedOutput() + return string(b), err +} + +func sockRequest(method, endpoint string) ([]byte, error) { + // FIX: the path to sock should not be hardcoded + sock := filepath.Join("/", "var", "run", "docker.sock") + c, err := net.DialTimeout("unix", sock, time.Duration(10*time.Second)) + if err != nil { + return nil, fmt.Errorf("could not dial docker sock at %s: %v", sock, err) + } + + client := httputil.NewClientConn(c, nil) + defer client.Close() + + req, err := http.NewRequest(method, endpoint, nil) + req.Header.Set("Content-Type", "application/json") + if err != nil { + return nil, fmt.Errorf("could not create new request: %v", err) + } + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("could not perform request: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("received status != 200 OK: %s", resp.Status) + } + + return ioutil.ReadAll(resp.Body) +} + +func deleteContainer(container string) error { + container = strings.Replace(container, "\n", " ", -1) + container = strings.Trim(container, " ") + killArgs := fmt.Sprintf("kill %v", container) + killSplitArgs := strings.Split(killArgs, " ") + killCmd := exec.Command(dockerBinary, killSplitArgs...) + runCommand(killCmd) + rmArgs := fmt.Sprintf("rm -v %v", container) + rmSplitArgs := strings.Split(rmArgs, " ") + rmCmd := exec.Command(dockerBinary, rmSplitArgs...) + exitCode, err := runCommand(rmCmd) + // set error manually if not set + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero") + } + + return err +} + +func getAllContainers() (string, error) { + getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of containers: %v\n", out) + } + + return out, err +} + +func deleteAllContainers() error { + containers, err := getAllContainers() + if err != nil { + fmt.Println(containers) + return err + } + + if err = deleteContainer(containers); err != nil { + return err + } + return nil +} + +func deleteImages(images ...string) error { + rmiCmd := exec.Command(dockerBinary, "rmi", strings.Join(images, " ")) + exitCode, err := runCommand(rmiCmd) + // set error manually if not set + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero") + } + + return err +} + +func imageExists(image string) error { + inspectCmd := exec.Command(dockerBinary, "inspect", image) + exitCode, err := runCommand(inspectCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("couldn't find image '%s'", image) + } + return err +} + +func pullImageIfNotExist(image string) (err error) { + if err := imageExists(image); err != nil { + pullCmd := exec.Command(dockerBinary, "pull", image) + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err != nil || exitCode != 0 { + err = fmt.Errorf("image '%s' wasn't found locally and it couldn't be pulled: %s", image, err) + } + } + return +} + +// deprecated, use dockerCmd instead +func cmd(t *testing.T, args ...string) (string, int, error) { + return dockerCmd(t, args...) +} + +func dockerCmd(t *testing.T, args ...string) (string, int, error) { + out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out)) + return out, status, err +} + +// execute a docker ocmmand with a timeout +func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, error) { + out, status, err := runCommandWithOutputAndTimeout(exec.Command(dockerBinary, args...), timeout) + if err != nil { + return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +// execute a docker command in a directory +func dockerCmdInDir(t *testing.T, path string, args ...string) (string, int, error) { + dockerCommand := exec.Command(dockerBinary, args...) + dockerCommand.Dir = path + out, status, err := runCommandWithOutput(dockerCommand) + if err != nil { + return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +// execute a docker command in a directory with a timeout +func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) (string, int, error) { + dockerCommand := exec.Command(dockerBinary, args...) + dockerCommand.Dir = path + out, status, err := runCommandWithOutputAndTimeout(dockerCommand, timeout) + if err != nil { + return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +func findContainerIP(t *testing.T, id string) string { + cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + return strings.Trim(out, " \r\n'") +} + +func getContainerCount() (int, error) { + const containers = "Containers:" + + cmd := exec.Command(dockerBinary, "info") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return 0, err + } + + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := stripTrailingCharacters(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + if err != nil { + return 0, err + } + return containerCount, nil + } + } + return 0, fmt.Errorf("couldn't find the Container count in the output") +} + +type FakeContext struct { + Dir string +} + +func (f *FakeContext) Add(file, content string) error { + filepath := path.Join(f.Dir, file) + dirpath := path.Dir(filepath) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(filepath, []byte(content), 0644) +} + +func (f *FakeContext) Delete(file string) error { + filepath := path.Join(f.Dir, file) + return os.RemoveAll(filepath) +} + +func (f *FakeContext) Close() error { + return os.RemoveAll(f.Dir) +} + +func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return nil, err + } + if err := os.Chmod(tmp, 0755); err != nil { + return nil, err + } + ctx := &FakeContext{tmp} + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + if err := ctx.Add("Dockerfile", dockerfile); err != nil { + ctx.Close() + return nil, err + } + return ctx, nil +} + +type FakeStorage struct { + *FakeContext + *httptest.Server +} + +func (f *FakeStorage) Close() error { + f.Server.Close() + return f.FakeContext.Close() +} + +func fakeStorage(files map[string]string) (*FakeStorage, error) { + tmp, err := ioutil.TempDir("", "fake-storage") + if err != nil { + return nil, err + } + ctx := &FakeContext{tmp} + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &FakeStorage{ + FakeContext: ctx, + Server: server, + }, nil +} + +func inspectField(name, field string) (string, error) { + format := fmt.Sprintf("{{.%s}}", field) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func inspectFieldJSON(name, field string) (string, error) { + format := fmt.Sprintf("{{json .%s}}", field) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func inspectFieldMap(name, path, field string) (string, error) { + format := fmt.Sprintf("{{index .%s %q}}", path, field) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func getIDByName(name string) (string, error) { + return inspectField(name, "Id") +} + +// getContainerState returns the exit code of the container +// and true if it's running +// the exit code should be ignored if it's running +func getContainerState(t *testing.T, id string) (int, bool, error) { + var ( + exitStatus int + running bool + ) + out, exitCode, err := dockerCmd(t, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) + if err != nil || exitCode != 0 { + return 0, false, fmt.Errorf("'%s' doesn't exist: %s", id, err) + } + + out = strings.Trim(out, "\n") + splitOutput := strings.Split(out, " ") + if len(splitOutput) != 2 { + return 0, false, fmt.Errorf("failed to get container state: output is broken") + } + if splitOutput[0] == "true" { + running = true + } + if n, err := strconv.Atoi(splitOutput[1]); err == nil { + exitStatus = n + } else { + return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") + } + + return exitStatus, running, nil +} + +func buildImageWithOut(name, dockerfile string, useCache bool) (string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", out, fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", out, err + } + return id, out, nil +} + +func buildImage(name, dockerfile string, useCache bool) (string, error) { + id, _, err := buildImageWithOut(name, dockerfile, useCache) + return id, err +} + +func buildImageFromContext(name string, ctx *FakeContext, useCache bool) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +func buildImageFromPath(name, path string, useCache bool) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, path) + buildCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +type FakeGIT struct { + *httptest.Server + Root string + RepoURL string +} + +func (g *FakeGIT) Close() { + g.Server.Close() + os.RemoveAll(g.Root) +} + +func fakeGIT(name string, files map[string]string) (*FakeGIT, error) { + tmp, err := ioutil.TempDir("", "fake-git-repo") + if err != nil { + return nil, err + } + ctx := &FakeContext{tmp} + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + return nil, err + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + return nil, err + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + return nil, err + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + return nil, err + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + return nil, err + } + handler := http.FileServer(http.Dir(root)) + server := httptest.NewServer(handler) + return &FakeGIT{ + Server: server, + Root: root, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL, name), + }, nil +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Call t.Fatal() at the first error. +func writeFile(dst, content string, t *testing.T) { + // Create subdirectories if necessary + if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { + t.Fatal(err) + } + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + t.Fatal(err) + } + // Write content (truncate if it exists) + if _, err := io.Copy(f, strings.NewReader(content)); err != nil { + t.Fatal(err) + } +} + +// Return the contents of file at path `src`. +// Call t.Fatal() at the first error (including if the file doesn't exist) +func readFile(src string, t *testing.T) (content string) { + f, err := os.Open(src) + if err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + return string(data) +} diff --git a/integration-cli/utils.go b/integration-cli/utils.go new file mode 100644 index 00000000..f3f128e3 --- /dev/null +++ b/integration-cli/utils.go @@ -0,0 +1,268 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "reflect" + "strings" + "syscall" + "testing" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +func processExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = getExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + exitCode = 0 + out, err := cmd.CombinedOutput() + exitCode = processExitCode(err) + output = string(out) + return +} + +func runCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { + var ( + stderrBuffer, stdoutBuffer bytes.Buffer + ) + exitCode = 0 + cmd.Stderr = &stderrBuffer + cmd.Stdout = &stdoutBuffer + err = cmd.Run() + exitCode = processExitCode(err) + + stdout = stdoutBuffer.String() + stderr = stderrBuffer.String() + return +} + +var ErrCmdTimeout = fmt.Errorf("command timed out") + +func runCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { + done := make(chan error) + go func() { + output, exitCode, err = runCommandWithOutput(cmd) + if err != nil || exitCode != 0 { + done <- fmt.Errorf("failed to run command: %s", err) + return + } + done <- nil + }() + select { + case <-time.After(timeout): + killFailed := cmd.Process.Kill() + if killFailed == nil { + fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, err) + } + err = ErrCmdTimeout + case <-done: + break + } + return +} + +func runCommand(cmd *exec.Cmd) (exitCode int, err error) { + exitCode = 0 + err = cmd.Run() + exitCode = processExitCode(err) + return +} + +func startCommand(cmd *exec.Cmd) (exitCode int, err error) { + exitCode = 0 + err = cmd.Start() + exitCode = processExitCode(err) + return +} + +func logDone(message string) { + fmt.Printf("[PASSED]: %s\n", message) +} + +func stripTrailingCharacters(target string) string { + target = strings.Trim(target, "\n") + target = strings.Trim(target, " ") + return target +} + +func errorOut(err error, t *testing.T, message string) { + if err != nil { + t.Fatal(message) + } +} + +func errorOutOnNonNilError(err error, t *testing.T, message string) { + if err == nil { + t.Fatalf(message) + } +} + +func nLines(s string) int { + return strings.Count(s, "\n") +} + +func unmarshalJSON(data []byte, result interface{}) error { + err := json.Unmarshal(data, result) + if err != nil { + return err + } + + return nil +} + +func deepEqual(expected interface{}, result interface{}) bool { + return reflect.DeepEqual(result, expected) +} + +func convertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +func waitForContainer(contID string, args ...string) error { + args = append([]string{"run", "--name", contID}, args...) + cmd := exec.Command(dockerBinary, args...) + if _, err := runCommand(cmd); err != nil { + return err + } + + if err := waitRun(contID); err != nil { + return err + } + + return nil +} + +func waitRun(contID string) error { + after := time.After(5 * time.Second) + + for { + cmd := exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", contID) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return fmt.Errorf("error executing docker inspect: %v", err) + } + + if strings.Contains(out, "true") { + break + } + + select { + case <-after: + return fmt.Errorf("container did not come up in time") + default: + } + + time.Sleep(100 * time.Millisecond) + } + + return nil +} + +func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +func ListTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +type FileServer struct { + *httptest.Server +} + +func fileServer(files map[string]string) (*FileServer, error) { + var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { + if filePath, found := files[r.URL.Path]; found { + http.ServeFile(w, r, filePath) + } else { + http.Error(w, http.StatusText(404), 404) + } + } + + for _, file := range files { + if _, err := os.Stat(file); err != nil { + return nil, err + } + } + server := httptest.NewServer(handler) + return &FileServer{ + Server: server, + }, nil +} + +func copyWithCP(source, target string) error { + copyCmd := exec.Command("cp", "-rp", source, target) + out, exitCode, err := runCommandWithOutput(copyCmd) + if err != nil || exitCode != 0 { + return fmt.Errorf("failed to copy: error: %q ,output: %q", err, out) + } + return nil +} + +func makeRandomString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff --git a/integration/MAINTAINERS b/integration/MAINTAINERS new file mode 100644 index 00000000..ad2d2d2b --- /dev/null +++ b/integration/MAINTAINERS @@ -0,0 +1,2 @@ +Tibor Vass (@tiborvass) +Cristian Staretu (@unclejack) diff --git a/integration/README.md b/integration/README.md new file mode 100644 index 00000000..41f43a4b --- /dev/null +++ b/integration/README.md @@ -0,0 +1,23 @@ +## Legacy integration tests + +`./integration` contains Docker's legacy integration tests. +It is DEPRECATED and will eventually be removed. + +### If you are a *CONTRIBUTOR* and want to add a test: + +* Consider mocking out side effects and contributing a *unit test* in the subsystem +you're modifying. For example, the remote API has unit tests in `./api/server/server_unit_tests.go`. +The events subsystem has unit tests in `./events/events_test.go`. And so on. + +* For end-to-end integration tests, please contribute to `./integration-cli`. + + +### If you are a *MAINTAINER* + +Please don't allow patches adding new tests to `./integration`. + +### If you are *LOOKING FOR A WAY TO HELP* + +Please consider porting tests away from `./integration` and into either unit tests or CLI tests. + +Any help will be greatly appreciated! diff --git a/integration/api_test.go b/integration/api_test.go new file mode 100644 index 00000000..8fa295e7 --- /dev/null +++ b/integration/api_test.go @@ -0,0 +1,1180 @@ +package docker + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/server" + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestGetContainersJSON(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + job := eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + beginLen := len(outs.Data) + + containerID := createTestContainer(eng, &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"echo", "test"}, + }, t) + + if containerID == "" { + t.Fatalf("Received empty container ID") + } + + req, err := http.NewRequest("GET", "/containers/json?all=1", nil) + if err != nil { + t.Fatal(err) + } + + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + containers := engine.NewTable("", 0) + if _, err := containers.ReadListFrom(r.Body.Bytes()); err != nil { + t.Fatal(err) + } + if len(containers.Data) != beginLen+1 { + t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers.Data), beginLen) + } + if id := containers.Data[0].Get("Id"); id != containerID { + t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", containerID, id) + } +} + +func TestGetContainersExport(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + // Create a container and remove a file + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"touch", "/test"}, + }, + t, + ) + containerRun(eng, containerID, t) + + r := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "/containers/"+containerID+"/export", nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + found := false + for tarReader := tar.NewReader(r.Body); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if h.Name == "test" { + found = true + break + } + } + if !found { + t.Fatalf("The created test file has not been found in the exported image") + } +} + +func TestSaveImageAndThenLoad(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + // save image + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + tarball := r.Body + + // delete the image + r = httptest.NewRecorder() + req, err = http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + // make sure there is no image + r = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusNotFound { + t.Fatalf("%d NotFound expected, received %d\n", http.StatusNotFound, r.Code) + } + + // load the image + r = httptest.NewRecorder() + req, err = http.NewRequest("POST", "/images/load", tarball) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + // finally make sure the image is there + r = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } +} + +func TestGetContainersChanges(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + // Create a container and remove a file + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/rm", "/etc/passwd"}, + }, + t, + ) + containerRun(eng, containerID, t) + + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/containers/"+containerID+"/changes", nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + outs := engine.NewTable("", 0) + if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil { + t.Fatal(err) + } + + // Check the changelog + success := false + for _, elem := range outs.Data { + if elem.Get("Path") == "/etc/passwd" && elem.GetInt("Kind") == 2 { + success = true + } + } + if !success { + t.Fatalf("/etc/passwd as been removed but is not present in the diff") + } +} + +func TestGetContainersTop(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/sh", "-c", "cat"}, + OpenStdin: true, + }, + t, + ) + defer func() { + // Make sure the process dies before destroying daemon + containerKill(eng, containerID, t) + containerWait(eng, containerID, t) + }() + + startContainer(eng, containerID, t) + + setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { + for { + if containerRunning(eng, containerID, t) { + break + } + time.Sleep(10 * time.Millisecond) + } + }) + + if !containerRunning(eng, containerID, t) { + t.Fatalf("Container should be running") + } + + // Make sure sh spawn up cat + setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { + in, out := containerAttach(eng, containerID, t) + if err := assertPipe("hello\n", "hello", out, in, 150); err != nil { + t.Fatal(err) + } + }) + + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/containers/"+containerID+"/top?ps_args=aux", nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + var procs engine.Env + if err := procs.Decode(r.Body); err != nil { + t.Fatal(err) + } + + if len(procs.GetList("Titles")) != 11 { + t.Fatalf("Expected 11 titles, found %d.", len(procs.GetList("Titles"))) + } + if procs.GetList("Titles")[0] != "USER" || procs.GetList("Titles")[10] != "COMMAND" { + t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.GetList("Titles")[0], procs.GetList("Titles")[10]) + } + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + t.Fatal(err) + } + if len(processes) != 2 { + t.Fatalf("Expected 2 processes, found %d.", len(processes)) + } + if processes[0][10] != "/bin/sh -c cat" { + t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[0][10]) + } + if processes[1][10] != "/bin/sh -c cat" { + t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[1][10]) + } +} + +func TestPostCommit(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + // Create a container and remove a file + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"touch", "/test"}, + }, + t, + ) + + containerRun(eng, containerID, t) + + req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusCreated { + t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) + } + + var env engine.Env + if err := env.Decode(r.Body); err != nil { + t.Fatal(err) + } + if err := eng.Job("image_inspect", env.Get("Id")).Run(); err != nil { + t.Fatalf("The image has not been committed") + } +} + +func TestPostContainersCreate(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + configJSON, err := json.Marshal(&runconfig.Config{ + Image: unitTestImageID, + Memory: 33554432, + Cmd: []string{"touch", "/test"}, + }) + if err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON)) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Content-Type", "application/json") + + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusCreated { + t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) + } + + var apiRun engine.Env + if err := apiRun.Decode(r.Body); err != nil { + t.Fatal(err) + } + containerID := apiRun.Get("Id") + + containerAssertExists(eng, containerID, t) + containerRun(eng, containerID, t) + + if !containerFileExists(eng, containerID, "test", t) { + t.Fatal("Test file was not created") + } +} + +func TestPostJsonVerify(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + configJSON, err := json.Marshal(&runconfig.Config{ + Image: unitTestImageID, + Memory: 33554432, + Cmd: []string{"touch", "/test"}, + }) + if err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON)) + if err != nil { + t.Fatal(err) + } + + r := httptest.NewRecorder() + + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + + // Don't add Content-Type header + // req.Header.Set("Content-Type", "application/json") + + err = server.ServeRequest(eng, api.APIVERSION, r, req) + if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") { + t.Fatal("Create should have failed due to no Content-Type header - got:", r) + } + + // Now add header but with wrong type and retest + req.Header.Set("Content-Type", "application/xml") + + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") { + t.Fatal("Create should have failed due to wrong Content-Type header - got:", r) + } +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func TestPostCreateNull(t *testing.T) { + eng := NewTestEngine(t) + daemon := mkDaemonFromEngine(eng, t) + defer daemon.Nuke() + + configStr := fmt.Sprintf(`{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"%s", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}`, unitTestImageID) + + req, err := http.NewRequest("POST", "/containers/create", strings.NewReader(configStr)) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Content-Type", "application/json") + + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusCreated { + t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) + } + + var apiRun engine.Env + if err := apiRun.Decode(r.Body); err != nil { + t.Fatal(err) + } + containerID := apiRun.Get("Id") + + containerAssertExists(eng, containerID, t) + + c := daemon.Get(containerID) + if c.Config.Cpuset != "" { + t.Fatalf("Cpuset should have been empty - instead its:" + c.Config.Cpuset) + } +} + +func TestPostContainersKill(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/cat"}, + OpenStdin: true, + }, + t, + ) + + startContainer(eng, containerID, t) + + // Give some time to the process to start + containerWaitTimeout(eng, containerID, t) + + if !containerRunning(eng, containerID, t) { + t.Errorf("Container should be running") + } + + r := httptest.NewRecorder() + req, err := http.NewRequest("POST", "/containers/"+containerID+"/kill", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusNoContent { + t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) + } + if containerRunning(eng, containerID, t) { + t.Fatalf("The container hasn't been killed") + } +} + +func TestPostContainersRestart(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/top"}, + OpenStdin: true, + }, + t, + ) + + startContainer(eng, containerID, t) + + // Give some time to the process to start + containerWaitTimeout(eng, containerID, t) + + if !containerRunning(eng, containerID, t) { + t.Errorf("Container should be running") + } + + req, err := http.NewRequest("POST", "/containers/"+containerID+"/restart?t=1", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusNoContent { + t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) + } + + // Give some time to the process to restart + containerWaitTimeout(eng, containerID, t) + + if !containerRunning(eng, containerID, t) { + t.Fatalf("Container should be running") + } + + containerKill(eng, containerID, t) +} + +func TestPostContainersStart(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + containerID := createTestContainer( + eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/cat"}, + OpenStdin: true, + }, + t, + ) + + hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{}) + + req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Content-Type", "application/json") + + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusNoContent { + t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) + } + + containerAssertExists(eng, containerID, t) + + req, err = http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Content-Type", "application/json") + + r = httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + + // Starting an already started container should return a 304 + assertHttpNotError(r, t) + if r.Code != http.StatusNotModified { + t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code) + } + containerAssertExists(eng, containerID, t) + containerKill(eng, containerID, t) +} + +func TestPostContainersStop(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/top"}, + OpenStdin: true, + }, + t, + ) + + startContainer(eng, containerID, t) + + // Give some time to the process to start + containerWaitTimeout(eng, containerID, t) + + if !containerRunning(eng, containerID, t) { + t.Errorf("Container should be running") + } + + // Note: as it is a POST request, it requires a body. + req, err := http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusNoContent { + t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) + } + if containerRunning(eng, containerID, t) { + t.Fatalf("The container hasn't been stopped") + } + + req, err = http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + + r = httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + + // Stopping an already stopper container should return a 304 + assertHttpNotError(r, t) + if r.Code != http.StatusNotModified { + t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code) + } +} + +func TestPostContainersWait(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/sleep", "1"}, + OpenStdin: true, + }, + t, + ) + startContainer(eng, containerID, t) + + setTimeout(t, "Wait timed out", 3*time.Second, func() { + r := httptest.NewRecorder() + req, err := http.NewRequest("POST", "/containers/"+containerID+"/wait", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + var apiWait engine.Env + if err := apiWait.Decode(r.Body); err != nil { + t.Fatal(err) + } + if apiWait.GetInt("StatusCode") != 0 { + t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.GetInt("StatusCode")) + } + }) + + if containerRunning(eng, containerID, t) { + t.Fatalf("The container should be stopped after wait") + } +} + +func TestPostContainersAttach(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/cat"}, + OpenStdin: true, + }, + t, + ) + // Start the process + startContainer(eng, containerID, t) + + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + + // Try to avoid the timeout in destroy. Best effort, don't check error + defer func() { + closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + containerKill(eng, containerID, t) + }() + + // Attach to it + c1 := make(chan struct{}) + go func() { + defer close(c1) + + r := &hijackTester{ + ResponseRecorder: httptest.NewRecorder(), + in: stdin, + out: stdoutPipe, + } + + req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r.ResponseRecorder, t) + }() + + // Acknowledge hijack + setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() { + stdout.Read([]byte{}) + stdout.Read(make([]byte, 4096)) + }) + + setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { + if err := assertPipe("hello\n", string([]byte{1, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil { + t.Fatal(err) + } + }) + + // Close pipes (client disconnects) + if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { + t.Fatal(err) + } + + // Wait for attach to finish, the client disconnected, therefore, Attach finished his job + setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() { + <-c1 + }) + + // We closed stdin, expect /bin/cat to still be running + // Wait a little bit to make sure container.monitor() did his thing + containerWaitTimeout(eng, containerID, t) + + // Try to avoid the timeout in destroy. Best effort, don't check error + cStdin, _ := containerAttach(eng, containerID, t) + cStdin.Close() + containerWait(eng, containerID, t) +} + +func TestPostContainersAttachStderr(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/sh", "-c", "/bin/cat >&2"}, + OpenStdin: true, + }, + t, + ) + // Start the process + startContainer(eng, containerID, t) + + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + + // Try to avoid the timeout in destroy. Best effort, don't check error + defer func() { + closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + containerKill(eng, containerID, t) + }() + + // Attach to it + c1 := make(chan struct{}) + go func() { + defer close(c1) + + r := &hijackTester{ + ResponseRecorder: httptest.NewRecorder(), + in: stdin, + out: stdoutPipe, + } + + req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r.ResponseRecorder, t) + }() + + // Acknowledge hijack + setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() { + stdout.Read([]byte{}) + stdout.Read(make([]byte, 4096)) + }) + + setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { + if err := assertPipe("hello\n", string([]byte{2, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil { + t.Fatal(err) + } + }) + + // Close pipes (client disconnects) + if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { + t.Fatal(err) + } + + // Wait for attach to finish, the client disconnected, therefore, Attach finished his job + setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() { + <-c1 + }) + + // We closed stdin, expect /bin/cat to still be running + // Wait a little bit to make sure container.monitor() did his thing + containerWaitTimeout(eng, containerID, t) + + // Try to avoid the timeout in destroy. Best effort, don't check error + cStdin, _ := containerAttach(eng, containerID, t) + cStdin.Close() + containerWait(eng, containerID, t) +} + +func TestOptionsRoute(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + r := httptest.NewRecorder() + req, err := http.NewRequest("OPTIONS", "/", nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusOK { + t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) + } +} + +func TestGetEnabledCors(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + r := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "/version", nil) + if err != nil { + t.Fatal(err) + } + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + if r.Code != http.StatusOK { + t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) + } + + allowOrigin := r.Header().Get("Access-Control-Allow-Origin") + allowHeaders := r.Header().Get("Access-Control-Allow-Headers") + allowMethods := r.Header().Get("Access-Control-Allow-Methods") + + if allowOrigin != "*" { + t.Errorf("Expected header Access-Control-Allow-Origin to be \"*\", %s found.", allowOrigin) + } + if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept" { + t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept\", %s found.", allowHeaders) + } + if allowMethods != "GET, POST, DELETE, PUT, OPTIONS" { + t.Errorf("Expected hearder Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods) + } +} + +func TestDeleteImages(t *testing.T) { + eng := NewTestEngine(t) + //we expect errors, so we disable stderr + eng.Stderr = ioutil.Discard + defer mkDaemonFromEngine(eng, t).Nuke() + + initialImages := getImages(eng, t, true, "") + + if err := eng.Job("tag", unitTestImageName, "test", "test").Run(); err != nil { + t.Fatal(err) + } + + images := getImages(eng, t, true, "") + + if len(images.Data[0].GetList("RepoTags")) != len(initialImages.Data[0].GetList("RepoTags"))+1 { + t.Errorf("Expected %d images, %d found", len(initialImages.Data[0].GetList("RepoTags"))+1, len(images.Data[0].GetList("RepoTags"))) + } + + req, err := http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) + if err != nil { + t.Fatal(err) + } + + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusConflict { + t.Fatalf("Expected http status 409-conflict, got %v", r.Code) + } + + req2, err := http.NewRequest("DELETE", "/images/test:test", nil) + if err != nil { + t.Fatal(err) + } + + r2 := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { + t.Fatal(err) + } + assertHttpNotError(r2, t) + if r2.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(r2.Body.Bytes()); err != nil { + t.Fatal(err) + } + if len(outs.Data) != 1 { + t.Fatalf("Expected %d event (untagged), got %d", 1, len(outs.Data)) + } + images = getImages(eng, t, false, "") + + if images.Len() != initialImages.Len() { + t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) + } +} + +func TestPostContainersCopy(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + // Create a container and remove a file + containerID := createTestContainer(eng, + &runconfig.Config{ + Image: unitTestImageID, + Cmd: []string{"touch", "/test.txt"}, + }, + t, + ) + containerRun(eng, containerID, t) + + r := httptest.NewRecorder() + + var copyData engine.Env + copyData.Set("Resource", "/test.txt") + copyData.Set("HostPath", ".") + + jsonData := bytes.NewBuffer(nil) + if err := copyData.Encode(jsonData); err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", jsonData) + if err != nil { + t.Fatal(err) + } + req.Header.Add("Content-Type", "application/json") + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + found := false + for tarReader := tar.NewReader(r.Body); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if h.Name == "test.txt" { + found = true + break + } + } + if !found { + t.Fatalf("The created test file has not been found in the copied output") + } +} + +func TestPostContainersCopyWhenContainerNotFound(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + r := httptest.NewRecorder() + + var copyData engine.Env + copyData.Set("Resource", "/test.txt") + copyData.Set("HostPath", ".") + + jsonData := bytes.NewBuffer(nil) + if err := copyData.Encode(jsonData); err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("POST", "/containers/id_not_found/copy", jsonData) + if err != nil { + t.Fatal(err) + } + req.Header.Add("Content-Type", "application/json") + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusNotFound { + t.Fatalf("404 expected for id_not_found Container, received %v", r.Code) + } +} + +// Regression test for https://github.com/docker/docker/issues/6231 +func TestConstainersStartChunkedEncodingHostConfig(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + r := httptest.NewRecorder() + + var testData engine.Env + testData.Set("Image", "docker-test-image") + testData.SetAuto("Volumes", map[string]struct{}{"/foo": {}}) + testData.Set("Cmd", "true") + jsonData := bytes.NewBuffer(nil) + if err := testData.Encode(jsonData); err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("POST", "/containers/create?name=chunk_test", jsonData) + if err != nil { + t.Fatal(err) + } + + req.Header.Add("Content-Type", "application/json") + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + var testData2 engine.Env + testData2.SetAuto("Binds", []string{"/tmp:/foo"}) + jsonData = bytes.NewBuffer(nil) + if err := testData2.Encode(jsonData); err != nil { + t.Fatal(err) + } + + req, err = http.NewRequest("POST", "/containers/chunk_test/start", jsonData) + if err != nil { + t.Fatal(err) + } + + req.Header.Add("Content-Type", "application/json") + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // http://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + type config struct { + HostConfig struct { + Binds []string + } + } + + req, err = http.NewRequest("GET", "/containers/chunk_test/json", nil) + if err != nil { + t.Fatal(err) + } + + r2 := httptest.NewRecorder() + req.Header.Add("Content-Type", "application/json") + if err := server.ServeRequest(eng, api.APIVERSION, r2, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + c := config{} + + json.Unmarshal(r2.Body.Bytes(), &c) + + if len(c.HostConfig.Binds) == 0 { + t.Fatal("Chunked Encoding not handled") + } + + if c.HostConfig.Binds[0] != "/tmp:/foo" { + t.Fatal("Chunked encoding not properly handled, execpted binds to be /tmp:/foo, got:", c.HostConfig.Binds[0]) + } +} + +// Mocked types for tests +type NopConn struct { + io.ReadCloser + io.Writer +} + +func (c *NopConn) LocalAddr() net.Addr { return nil } +func (c *NopConn) RemoteAddr() net.Addr { return nil } +func (c *NopConn) SetDeadline(t time.Time) error { return nil } +func (c *NopConn) SetReadDeadline(t time.Time) error { return nil } +func (c *NopConn) SetWriteDeadline(t time.Time) error { return nil } + +type hijackTester struct { + *httptest.ResponseRecorder + in io.ReadCloser + out io.Writer +} + +func (t *hijackTester) Hijack() (net.Conn, *bufio.ReadWriter, error) { + bufrw := bufio.NewReadWriter(bufio.NewReader(t.in), bufio.NewWriter(t.out)) + conn := &NopConn{ + ReadCloser: t.in, + Writer: t.out, + } + return conn, bufrw, nil +} diff --git a/integration/commands_test.go b/integration/commands_test.go new file mode 100644 index 00000000..532e6f79 --- /dev/null +++ b/integration/commands_test.go @@ -0,0 +1,564 @@ +package docker + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/client" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" + "github.com/docker/libtrust" +) + +func closeWrap(args ...io.Closer) error { + e := false + ret := fmt.Errorf("Error closing elements") + for _, c := range args { + if err := c.Close(); err != nil { + e = true + ret = fmt.Errorf("%s\n%s", ret, err) + } + } + if e { + return ret + } + return nil +} + +func setRaw(t *testing.T, c *daemon.Container) *term.State { + pty, err := c.GetPtyMaster() + if err != nil { + t.Fatal(err) + } + state, err := term.MakeRaw(pty.Fd()) + if err != nil { + t.Fatal(err) + } + return state +} + +func unsetRaw(t *testing.T, c *daemon.Container, state *term.State) { + pty, err := c.GetPtyMaster() + if err != nil { + t.Fatal(err) + } + term.RestoreTerminal(pty.Fd(), state) +} + +func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container { + var container *daemon.Container + + setTimeout(t, "Waiting for the container to be started timed out", timeout, func() { + for { + l := globalDaemon.List() + if len(l) == 1 && l[0].IsRunning() { + container = l[0] + break + } + time.Sleep(10 * time.Millisecond) + } + }) + + if container == nil { + t.Fatal("An error occured while waiting for the container to start") + } + + return container +} + +func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { + c := make(chan bool) + + // Make sure we are not too long + go func() { + time.Sleep(d) + c <- true + }() + go func() { + f() + c <- false + }() + if <-c && msg != "" { + t.Fatal(msg) + } +} + +func expectPipe(expected string, r io.Reader) error { + o, err := bufio.NewReader(r).ReadString('\n') + if err != nil { + return err + } + if strings.Trim(o, " \r\n") != expected { + return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", expected, o) + } + return nil +} + +func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error { + for i := 0; i < count; i++ { + if _, err := w.Write([]byte(input)); err != nil { + return err + } + if err := expectPipe(output, r); err != nil { + return err + } + } + return nil +} + +// Expected behaviour: the process dies when the client disconnects +func TestRunDisconnect(t *testing.T) { + + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + c1 := make(chan struct{}) + go func() { + // We're simulating a disconnect so the return value doesn't matter. What matters is the + // fact that CmdRun returns. + cli.CmdRun("-i", unitTestImageID, "/bin/cat") + close(c1) + }() + + setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { + if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + t.Fatal(err) + } + }) + + // Close pipes (simulate disconnect) + if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { + t.Fatal(err) + } + + // as the pipes are close, we expect the process to die, + // therefore CmdRun to unblock. Wait for CmdRun + setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() { + <-c1 + }) + + // Client disconnect after run -i should cause stdin to be closed, which should + // cause /bin/cat to exit. + setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() { + container := globalDaemon.List()[0] + container.WaitStop(-1 * time.Second) + if container.IsRunning() { + t.Fatalf("/bin/cat is still running after closing stdin") + } + }) +} + +// Expected behaviour: the process stay alive when the client disconnects +// but the client detaches. +func TestRunDisconnectTty(t *testing.T) { + + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + c1 := make(chan struct{}) + go func() { + defer close(c1) + // We're simulating a disconnect so the return value doesn't matter. What matters is the + // fact that CmdRun returns. + if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil { + log.Debugf("Error CmdRun: %s", err) + } + }() + + container := waitContainerStart(t, 10*time.Second) + + state := setRaw(t, container) + defer unsetRaw(t, container, state) + + // Client disconnect after run -i should keep stdin out in TTY mode + setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { + if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + t.Fatal(err) + } + }) + + // Close pipes (simulate disconnect) + if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { + t.Fatal(err) + } + + // wait for CmdRun to return + setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() { + <-c1 + }) + + // In tty mode, we expect the process to stay alive even after client's stdin closes. + + // Give some time to monitor to do his thing + container.WaitStop(500 * time.Millisecond) + if !container.IsRunning() { + t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)") + } +} + +// TestRunDetach checks attaching and detaching with the escape sequence. +func TestRunDetach(t *testing.T) { + + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + ch := make(chan struct{}) + go func() { + defer close(ch) + cli.CmdRun("-i", "-t", unitTestImageID, "cat") + }() + + container := waitContainerStart(t, 10*time.Second) + + state := setRaw(t, container) + defer unsetRaw(t, container, state) + + setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { + if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + t.Fatal(err) + } + }) + + setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { + stdinPipe.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + stdinPipe.Write([]byte{17}) + }) + + // wait for CmdRun to return + setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() { + <-ch + }) + closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + + time.Sleep(500 * time.Millisecond) + if !container.IsRunning() { + t.Fatal("The detached container should be still running") + } + + setTimeout(t, "Waiting for container to die timed out", 20*time.Second, func() { + container.Kill() + }) +} + +// TestAttachDetach checks that attach in tty mode can be detached using the long container ID +func TestAttachDetach(t *testing.T) { + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + ch := make(chan struct{}) + go func() { + defer close(ch) + if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { + t.Fatal(err) + } + }() + + container := waitContainerStart(t, 10*time.Second) + + setTimeout(t, "Reading container's id timed out", 10*time.Second, func() { + buf := make([]byte, 1024) + n, err := stdout.Read(buf) + if err != nil { + t.Fatal(err) + } + + if strings.Trim(string(buf[:n]), " \r\n") != container.ID { + t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n]) + } + }) + setTimeout(t, "Starting container timed out", 10*time.Second, func() { + <-ch + }) + + state := setRaw(t, container) + defer unsetRaw(t, container, state) + + stdin, stdinPipe = io.Pipe() + stdout, stdoutPipe = io.Pipe() + cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + + ch = make(chan struct{}) + go func() { + defer close(ch) + if err := cli.CmdAttach(container.ID); err != nil { + if err != io.ErrClosedPipe { + t.Fatal(err) + } + } + }() + + setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { + if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err != io.ErrClosedPipe { + t.Fatal(err) + } + } + }) + + setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { + stdinPipe.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + stdinPipe.Write([]byte{17}) + }) + + // wait for CmdRun to return + setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { + <-ch + }) + + closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + + time.Sleep(500 * time.Millisecond) + if !container.IsRunning() { + t.Fatal("The detached container should be still running") + } + + setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { + container.Kill() + }) +} + +// TestAttachDetachTruncatedID checks that attach in tty mode can be detached +func TestAttachDetachTruncatedID(t *testing.T) { + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + // Discard the CmdRun output + go stdout.Read(make([]byte, 1024)) + setTimeout(t, "Starting container timed out", 2*time.Second, func() { + if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { + t.Fatal(err) + } + }) + + container := waitContainerStart(t, 10*time.Second) + + state := setRaw(t, container) + defer unsetRaw(t, container, state) + + stdin, stdinPipe = io.Pipe() + stdout, stdoutPipe = io.Pipe() + cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + + ch := make(chan struct{}) + go func() { + defer close(ch) + if err := cli.CmdAttach(utils.TruncateID(container.ID)); err != nil { + if err != io.ErrClosedPipe { + t.Fatal(err) + } + } + }() + + setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { + if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err != io.ErrClosedPipe { + t.Fatal(err) + } + } + }) + + setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { + stdinPipe.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + stdinPipe.Write([]byte{17}) + }) + + // wait for CmdRun to return + setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { + <-ch + }) + closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + + time.Sleep(500 * time.Millisecond) + if !container.IsRunning() { + t.Fatal("The detached container should be still running") + } + + setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { + container.Kill() + }) +} + +// Expected behaviour, the process stays alive when the client disconnects +func TestAttachDisconnect(t *testing.T) { + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + go func() { + // Start a process in daemon mode + if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil { + log.Debugf("Error CmdRun: %s", err) + } + }() + + setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() { + if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { + t.Fatal(err) + } + }) + + setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { + for { + l := globalDaemon.List() + if len(l) == 1 && l[0].IsRunning() { + break + } + time.Sleep(10 * time.Millisecond) + } + }) + + container := globalDaemon.List()[0] + + // Attach to it + c1 := make(chan struct{}) + go func() { + // We're simulating a disconnect so the return value doesn't matter. What matters is the + // fact that CmdAttach returns. + cli.CmdAttach(container.ID) + close(c1) + }() + + setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { + if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + t.Fatal(err) + } + }) + // Close pipes (client disconnects) + if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { + t.Fatal(err) + } + + // Wait for attach to finish, the client disconnected, therefore, Attach finished his job + setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() { + <-c1 + }) + + // We closed stdin, expect /bin/cat to still be running + // Wait a little bit to make sure container.monitor() did his thing + _, err = container.WaitStop(500 * time.Millisecond) + if err == nil || !container.IsRunning() { + t.Fatalf("/bin/cat is not running after closing stdin") + } + + // Try to avoid the timeout in destroy. Best effort, don't check error + cStdin, _ := container.StdinPipe() + cStdin.Close() + container.WaitStop(-1 * time.Second) +} + +// Expected behaviour: container gets deleted automatically after exit +func TestRunAutoRemove(t *testing.T) { + t.Skip("Fixme. Skipping test for now, race condition") + stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + c := make(chan struct{}) + go func() { + defer close(c) + if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil { + t.Fatal(err) + } + }() + + var temporaryContainerID string + setTimeout(t, "Reading command output time out", 2*time.Second, func() { + cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + t.Fatal(err) + } + temporaryContainerID = cmdOutput + if err := closeWrap(stdout, stdoutPipe); err != nil { + t.Fatal(err) + } + }) + + setTimeout(t, "CmdRun timed out", 10*time.Second, func() { + <-c + }) + + time.Sleep(500 * time.Millisecond) + + if len(globalDaemon.List()) > 0 { + t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID) + } +} + +// Expected behaviour: error out when attempting to bind mount non-existing source paths +func TestRunErrorBindNonExistingSource(t *testing.T) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cli := client.NewDockerCli(nil, nil, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + c := make(chan struct{}) + go func() { + defer close(c) + // This check is made at runtime, can't be "unit tested" + if err := cli.CmdRun("-v", "/i/dont/exist:/tmp", unitTestImageID, "echo 'should fail'"); err == nil { + t.Fatal("should have failed to run when using /i/dont/exist as a source for the bind mount") + } + }() + + setTimeout(t, "CmdRun timed out", 5*time.Second, func() { + <-c + }) +} diff --git a/integration/container_test.go b/integration/container_test.go new file mode 100644 index 00000000..ab94cbc6 --- /dev/null +++ b/integration/container_test.go @@ -0,0 +1,259 @@ +package docker + +import ( + "io" + "io/ioutil" + "testing" + "time" + + "github.com/docker/docker/runconfig" +) + +func TestRestartStdin(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"cat"}, + + OpenStdin: true, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + t.Fatal(err) + } + defer daemon.Destroy(container) + + stdin, err := container.StdinPipe() + if err != nil { + t.Fatal(err) + } + stdout, err := container.StdoutPipe() + if err != nil { + t.Fatal(err) + } + if err := container.Start(); err != nil { + t.Fatal(err) + } + if _, err := io.WriteString(stdin, "hello world"); err != nil { + t.Fatal(err) + } + if err := stdin.Close(); err != nil { + t.Fatal(err) + } + container.WaitStop(-1 * time.Second) + output, err := ioutil.ReadAll(stdout) + if err != nil { + t.Fatal(err) + } + if err := stdout.Close(); err != nil { + t.Fatal(err) + } + if string(output) != "hello world" { + t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) + } + + // Restart and try again + stdin, err = container.StdinPipe() + if err != nil { + t.Fatal(err) + } + stdout, err = container.StdoutPipe() + if err != nil { + t.Fatal(err) + } + if err := container.Start(); err != nil { + t.Fatal(err) + } + if _, err := io.WriteString(stdin, "hello world #2"); err != nil { + t.Fatal(err) + } + if err := stdin.Close(); err != nil { + t.Fatal(err) + } + container.WaitStop(-1 * time.Second) + output, err = ioutil.ReadAll(stdout) + if err != nil { + t.Fatal(err) + } + if err := stdout.Close(); err != nil { + t.Fatal(err) + } + if string(output) != "hello world #2" { + t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output)) + } +} + +func TestStdin(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"cat"}, + + OpenStdin: true, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + t.Fatal(err) + } + defer daemon.Destroy(container) + + stdin, err := container.StdinPipe() + if err != nil { + t.Fatal(err) + } + stdout, err := container.StdoutPipe() + if err != nil { + t.Fatal(err) + } + if err := container.Start(); err != nil { + t.Fatal(err) + } + defer stdin.Close() + defer stdout.Close() + if _, err := io.WriteString(stdin, "hello world"); err != nil { + t.Fatal(err) + } + if err := stdin.Close(); err != nil { + t.Fatal(err) + } + container.WaitStop(-1 * time.Second) + output, err := ioutil.ReadAll(stdout) + if err != nil { + t.Fatal(err) + } + if string(output) != "hello world" { + t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) + } +} + +func TestTty(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"cat"}, + + OpenStdin: true, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + t.Fatal(err) + } + defer daemon.Destroy(container) + + stdin, err := container.StdinPipe() + if err != nil { + t.Fatal(err) + } + stdout, err := container.StdoutPipe() + if err != nil { + t.Fatal(err) + } + if err := container.Start(); err != nil { + t.Fatal(err) + } + defer stdin.Close() + defer stdout.Close() + if _, err := io.WriteString(stdin, "hello world"); err != nil { + t.Fatal(err) + } + if err := stdin.Close(); err != nil { + t.Fatal(err) + } + container.WaitStop(-1 * time.Second) + output, err := ioutil.ReadAll(stdout) + if err != nil { + t.Fatal(err) + } + if string(output) != "hello world" { + t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) + } +} + +func BenchmarkRunSequential(b *testing.B) { + daemon := mkDaemon(b) + defer nuke(daemon) + for i := 0; i < b.N; i++ { + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"echo", "-n", "foo"}, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + b.Fatal(err) + } + defer daemon.Destroy(container) + output, err := container.Output() + if err != nil { + b.Fatal(err) + } + if string(output) != "foo" { + b.Fatalf("Unexpected output: %s", output) + } + if err := daemon.Destroy(container); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRunParallel(b *testing.B) { + daemon := mkDaemon(b) + defer nuke(daemon) + + var tasks []chan error + + for i := 0; i < b.N; i++ { + complete := make(chan error) + tasks = append(tasks, complete) + go func(i int, complete chan error) { + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"echo", "-n", "foo"}, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + complete <- err + return + } + defer daemon.Destroy(container) + if err := container.Start(); err != nil { + complete <- err + return + } + if _, err := container.WaitStop(15 * time.Second); err != nil { + complete <- err + return + } + // if string(output) != "foo" { + // complete <- fmt.Errorf("Unexecpted output: %v", string(output)) + // } + if err := daemon.Destroy(container); err != nil { + complete <- err + return + } + complete <- nil + }(i, complete) + } + var errors []error + for _, task := range tasks { + err := <-task + if err != nil { + errors = append(errors, err) + } + } + if len(errors) > 0 { + b.Fatal(errors) + } +} diff --git a/integration/fixtures/https/ca.pem b/integration/fixtures/https/ca.pem new file mode 100644 index 00000000..6825d6d1 --- /dev/null +++ b/integration/fixtures/https/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/client-cert.pem b/integration/fixtures/https/client-cert.pem new file mode 100644 index 00000000..c05ed47c --- /dev/null +++ b/integration/fixtures/https/client-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/client-key.pem b/integration/fixtures/https/client-key.pem new file mode 100644 index 00000000..b5c15f8d --- /dev/null +++ b/integration/fixtures/https/client-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff --git a/integration/fixtures/https/client-rogue-cert.pem b/integration/fixtures/https/client-rogue-cert.pem new file mode 100644 index 00000000..21ae4bd5 --- /dev/null +++ b/integration/fixtures/https/client-rogue-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 24 17:54:59 2014 GMT + Not After : Feb 22 17:54:59 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: + e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: + 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: + bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: + b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: + f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: + e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: + 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: + 1d:7b:6c:7b:be:89:6b:88:8b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: + 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: + fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: + be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: + cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: + 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: + 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: + b3:f9 +-----BEGIN CERTIFICATE----- +MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx +ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t +YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM +R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 +aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL +lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB +hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW +BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x +I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw +EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL +EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l +MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB +AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 +RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 +C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/client-rogue-key.pem b/integration/fixtures/https/client-rogue-key.pem new file mode 100644 index 00000000..53c122ab --- /dev/null +++ b/integration/fixtures/https/client-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce +aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W +tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf +bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ +nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW ++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej +VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd +vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 +6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F +MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa +8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg +OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ +SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 +nrOdMf15T6QF7Q== +-----END PRIVATE KEY----- diff --git a/integration/fixtures/https/server-cert.pem b/integration/fixtures/https/server-cert.pem new file mode 100644 index 00000000..08abfd1a --- /dev/null +++ b/integration/fixtures/https/server-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/server-key.pem b/integration/fixtures/https/server-key.pem new file mode 100644 index 00000000..c269320e --- /dev/null +++ b/integration/fixtures/https/server-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff --git a/integration/fixtures/https/server-rogue-cert.pem b/integration/fixtures/https/server-rogue-cert.pem new file mode 100644 index 00000000..28feba66 --- /dev/null +++ b/integration/fixtures/https/server-rogue-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 28 18:49:31 2014 GMT + Not After : Feb 26 18:49:31 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: + 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: + 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: + 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: + 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: + aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: + d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: + 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: + 9e:02:5c:be:65:98:a4:b4:b5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: + 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: + 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: + 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: + 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: + 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: + ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: + c7:9f +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv +c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu +ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I +dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc +qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW +VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg +hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO +lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe +MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj +bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw +Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO +AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 +CQxdfIYk3ZLVsxQGx58= +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/server-rogue-key.pem b/integration/fixtures/https/server-rogue-key.pem new file mode 100644 index 00000000..10f7c650 --- /dev/null +++ b/integration/fixtures/https/server-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG +j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq +FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C +ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR +8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 +6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl +1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD +37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO +moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl +3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w +ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs +wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj +iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ +Z1hrIq8xYl2LOQ== +-----END PRIVATE KEY----- diff --git a/integration/graph_test.go b/integration/graph_test.go new file mode 100644 index 00000000..203476cb --- /dev/null +++ b/integration/graph_test.go @@ -0,0 +1,318 @@ +package docker + +import ( + "errors" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/utils" + "io" + "io/ioutil" + "os" + "path" + "testing" + "time" +) + +func TestMount(t *testing.T) { + graph, driver := tempGraph(t) + defer os.RemoveAll(graph.Root) + defer driver.Cleanup() + + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + image, err := graph.Create(archive, "", "", "Testing", "", nil, nil) + if err != nil { + t.Fatal(err) + } + tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + rootfs := path.Join(tmp, "rootfs") + if err := os.MkdirAll(rootfs, 0700); err != nil { + t.Fatal(err) + } + rw := path.Join(tmp, "rw") + if err := os.MkdirAll(rw, 0700); err != nil { + t.Fatal(err) + } + + if _, err := driver.Get(image.ID, ""); err != nil { + t.Fatal(err) + } +} + +func TestInit(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + // Root should exist + if _, err := os.Stat(graph.Root); err != nil { + t.Fatal(err) + } + // Map() should be empty + if l, err := graph.Map(); err != nil { + t.Fatal(err) + } else if len(l) != 0 { + t.Fatalf("len(Map()) should return %d, not %d", 0, len(l)) + } +} + +// Test that Register can be interrupted cleanly without side effects +func TestInterruptedRegister(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data + image := &image.Image{ + ID: utils.GenerateRandomID(), + Comment: "testing", + Created: time.Now(), + } + w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) + graph.Register(image, nil, badArchive) + if _, err := graph.Get(image.ID); err == nil { + t.Fatal("Image should not exist after Register is interrupted") + } + // Registering the same image again should succeed if the first register was interrupted + goodArchive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + if err := graph.Register(image, nil, goodArchive); err != nil { + t.Fatal(err) + } +} + +// FIXME: Do more extensive tests (ex: create multiple, delete, recreate; +// create multiple, check the amount of images and paths, etc..) +func TestGraphCreate(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img, err := graph.Create(archive, "", "", "Testing", "", nil, nil) + if err != nil { + t.Fatal(err) + } + if err := utils.ValidateID(img.ID); err != nil { + t.Fatal(err) + } + if img.Comment != "Testing" { + t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment) + } + if img.DockerVersion != dockerversion.VERSION { + t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion) + } + images, err := graph.Map() + if err != nil { + t.Fatal(err) + } else if l := len(images); l != 1 { + t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) + } + if images[img.ID] == nil { + t.Fatalf("Could not find image with id %s", img.ID) + } +} + +func TestRegister(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + image := &image.Image{ + ID: utils.GenerateRandomID(), + Comment: "testing", + Created: time.Now(), + } + err = graph.Register(image, nil, archive) + if err != nil { + t.Fatal(err) + } + if images, err := graph.Map(); err != nil { + t.Fatal(err) + } else if l := len(images); l != 1 { + t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) + } + if resultImg, err := graph.Get(image.ID); err != nil { + t.Fatal(err) + } else { + if resultImg.ID != image.ID { + t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID) + } + if resultImg.Comment != image.Comment { + t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment) + } + } +} + +// Test that an image can be deleted by its shorthand prefix +func TestDeletePrefix(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + img := createTestImage(graph, t) + if err := graph.Delete(utils.TruncateID(img.ID)); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 0) +} + +func createTestImage(graph *graph.Graph, t *testing.T) *image.Image { + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img, err := graph.Create(archive, "", "", "Test image", "", nil, nil) + if err != nil { + t.Fatal(err) + } + return img +} + +func TestDelete(t *testing.T) { + graph, _ := tempGraph(t) + defer nukeGraph(graph) + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 0) + img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil) + if err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 1) + if err := graph.Delete(img.ID); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 0) + + archive, err = fakeTar() + if err != nil { + t.Fatal(err) + } + // Test 2 create (same name) / 1 delete + img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil) + if err != nil { + t.Fatal(err) + } + archive, err = fakeTar() + if err != nil { + t.Fatal(err) + } + if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 2) + if err := graph.Delete(img1.ID); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 1) + + // Test delete wrong name + if err := graph.Delete("Not_foo"); err == nil { + t.Fatalf("Deleting wrong ID should return an error") + } + assertNImages(graph, t, 1) + + archive, err = fakeTar() + if err != nil { + t.Fatal(err) + } + // Test delete twice (pull -> rm -> pull -> rm) + if err := graph.Register(img1, nil, archive); err != nil { + t.Fatal(err) + } + if err := graph.Delete(img1.ID); err != nil { + t.Fatal(err) + } + assertNImages(graph, t, 1) +} + +func TestByParent(t *testing.T) { + archive1, _ := fakeTar() + archive2, _ := fakeTar() + archive3, _ := fakeTar() + + graph, _ := tempGraph(t) + defer nukeGraph(graph) + parentImage := &image.Image{ + ID: utils.GenerateRandomID(), + Comment: "parent", + Created: time.Now(), + Parent: "", + } + childImage1 := &image.Image{ + ID: utils.GenerateRandomID(), + Comment: "child1", + Created: time.Now(), + Parent: parentImage.ID, + } + childImage2 := &image.Image{ + ID: utils.GenerateRandomID(), + Comment: "child2", + Created: time.Now(), + Parent: parentImage.ID, + } + _ = graph.Register(parentImage, nil, archive1) + _ = graph.Register(childImage1, nil, archive2) + _ = graph.Register(childImage2, nil, archive3) + + byParent, err := graph.ByParent() + if err != nil { + t.Fatal(err) + } + numChildren := len(byParent[parentImage.ID]) + if numChildren != 2 { + t.Fatalf("Expected 2 children, found %d", numChildren) + } +} + +/* + * HELPER FUNCTIONS + */ + +func assertNImages(graph *graph.Graph, t *testing.T, n int) { + if images, err := graph.Map(); err != nil { + t.Fatal(err) + } else if actualN := len(images); actualN != n { + t.Fatalf("Expected %d images, found %d", n, actualN) + } +} + +func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) { + tmp, err := ioutil.TempDir("", "docker-graph-") + if err != nil { + t.Fatal(err) + } + driver, err := graphdriver.New(tmp, nil) + if err != nil { + t.Fatal(err) + } + graph, err := graph.NewGraph(tmp, driver) + if err != nil { + t.Fatal(err) + } + return graph, driver +} + +func nukeGraph(graph *graph.Graph) { + graph.Driver().Cleanup() + os.RemoveAll(graph.Root) +} + +func testArchive(t *testing.T) archive.Archive { + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + return archive +} diff --git a/integration/https_test.go b/integration/https_test.go new file mode 100644 index 00000000..0705dc81 --- /dev/null +++ b/integration/https_test.go @@ -0,0 +1,97 @@ +package docker + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/client" + "github.com/docker/libtrust" +) + +const ( + errBadCertificate = "remote error: bad certificate" + errCaUnknown = "x509: certificate signed by unknown authority" +) + +func getTlsConfig(certFile, keyFile string, t *testing.T) *tls.Config { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile("fixtures/https/ca.pem") + if err != nil { + t.Fatal(err) + } + certPool.AppendCertsFromPEM(file) + + cert, err := tls.LoadX509KeyPair("fixtures/https/"+certFile, "fixtures/https/"+keyFile) + if err != nil { + t.Fatalf("Couldn't load X509 key pair: %s", err) + } + tlsConfig := &tls.Config{ + RootCAs: certPool, + Certificates: []tls.Certificate{cert}, + } + return tlsConfig +} + +// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint +func TestHttpsInfo(t *testing.T) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, key, testDaemonProto, + testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + if err := cli.CmdInfo(); err != nil { + t.Fatal(err) + } + }) +} + +// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func TestHttpsInfoRogueCert(t *testing.T) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, key, testDaemonProto, + testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + err := cli.CmdInfo() + if err == nil { + t.Fatal("Expected error but got nil") + } + if !strings.Contains(err.Error(), errBadCertificate) { + t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) + } + }) +} + +// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func TestHttpsInfoRogueServerCert(t *testing.T) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, key, testDaemonProto, + testDaemonRogueHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + err := cli.CmdInfo() + if err == nil { + t.Fatal("Expected error but got nil") + } + + if !strings.Contains(err.Error(), errCaUnknown) { + t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) + } + + }) +} diff --git a/integration/runtime_test.go b/integration/runtime_test.go new file mode 100644 index 00000000..0a9a74cf --- /dev/null +++ b/integration/runtime_test.go @@ -0,0 +1,899 @@ +package docker + +import ( + "bytes" + "fmt" + "io" + std_log "log" + "net" + "net/url" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "testing" + "time" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +const ( + unitTestImageName = "docker-test-image" + unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 + unitTestImageIDShort = "83599e29c455" + unitTestNetworkBridge = "testdockbr0" + unitTestStoreBase = "/var/lib/docker/unit-tests" + unitTestDockerTmpdir = "/var/lib/docker/tmp" + testDaemonAddr = "127.0.0.1:4270" + testDaemonProto = "tcp" + testDaemonHttpsProto = "tcp" + testDaemonHttpsAddr = "localhost:4271" + testDaemonRogueHttpsAddr = "localhost:4272" +) + +var ( + // FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted. + globalDaemon *daemon.Daemon + globalEngine *engine.Engine + globalHttpsEngine *engine.Engine + globalRogueHttpsEngine *engine.Engine + startFds int + startGoroutines int +) + +// FIXME: nuke() is deprecated by Daemon.Nuke() +func nuke(daemon *daemon.Daemon) error { + return daemon.Nuke() +} + +// FIXME: cleanup and nuke are redundant. +func cleanup(eng *engine.Engine, t *testing.T) error { + daemon := mkDaemonFromEngine(eng, t) + for _, container := range daemon.List() { + container.Kill() + daemon.Destroy(container) + } + job := eng.Job("images") + images, err := job.Stdout.AddTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + for _, image := range images.Data { + if image.Get("Id") != unitTestImageID { + eng.Job("image_delete", image.Get("Id")).Run() + } + } + return nil +} + +func layerArchive(tarfile string) (io.Reader, error) { + // FIXME: need to close f somewhere + f, err := os.Open(tarfile) + if err != nil { + return nil, err + } + return f, nil +} + +func init() { + // Always use the same driver (vfs) for all integration tests. + // To test other drivers, we need a dedicated driver validation suite. + os.Setenv("DOCKER_DRIVER", "vfs") + os.Setenv("TEST", "1") + os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir) + + // Hack to run sys init during unit testing + if reexec.Init() { + return + } + + if uid := syscall.Geteuid(); uid != 0 { + log.Fatalf("docker tests need to be run as root") + } + + // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) + if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" { + src, err := os.Open(dockerinit) + if err != nil { + log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err) + } + defer src.Close() + dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555) + if err != nil { + log.Fatalf("Unable to create dockerinit in test directory: %s", err) + } + defer dst.Close() + if _, err := io.Copy(dst, src); err != nil { + log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err) + } + dst.Close() + src.Close() + } + + // Setup the base daemon, which will be duplicated for each test. + // (no tests are run directly in the base) + setupBaseImage() + + // Create the "global daemon" with a long-running daemons for integration tests + spawnGlobalDaemon() + spawnLegitHttpsDaemon() + spawnRogueHttpsDaemon() + startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() +} + +func setupBaseImage() { + eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase) + job := eng.Job("image_inspect", unitTestImageName) + img, _ := job.Stdout.AddEnv() + // If the unit test is not found, try to download it. + if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID { + // Retrieve the Image + job = eng.Job("pull", unitTestImageName) + job.Stdout.Add(ioutils.NopWriteCloser(os.Stdout)) + if err := job.Run(); err != nil { + log.Fatalf("Unable to pull the test image: %s", err) + } + } +} + +func spawnGlobalDaemon() { + if globalDaemon != nil { + log.Debugf("Global daemon already exists. Skipping.") + return + } + t := std_log.New(os.Stderr, "", 0) + eng := NewTestEngine(t) + globalEngine = eng + globalDaemon = mkDaemonFromEngine(eng, t) + + // Spawn a Daemon + go func() { + log.Debugf("Spawning global daemon for integration tests") + listenURL := &url.URL{ + Scheme: testDaemonProto, + Host: testDaemonAddr, + } + job := eng.Job("serveapi", listenURL.String()) + job.SetenvBool("Logging", true) + if err := job.Run(); err != nil { + log.Fatalf("Unable to spawn the test daemon: %s", err) + } + }() + + // Give some time to ListenAndServer to actually start + // FIXME: use inmem transports instead of tcp + time.Sleep(time.Second) + + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatalf("Unable to accept connections for test api: %s", err) + } +} + +func spawnLegitHttpsDaemon() { + if globalHttpsEngine != nil { + return + } + globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem", + "fixtures/https/server-cert.pem", "fixtures/https/server-key.pem") +} + +func spawnRogueHttpsDaemon() { + if globalRogueHttpsEngine != nil { + return + } + globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem", + "fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem") +} + +func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine { + t := std_log.New(os.Stderr, "", 0) + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + t.Fatal(err) + } + // FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false, + // and we want to set it to true. + + eng := newTestEngine(t, true, root) + + // Spawn a Daemon + go func() { + log.Debugf("Spawning https daemon for integration tests") + listenURL := &url.URL{ + Scheme: testDaemonHttpsProto, + Host: addr, + } + job := eng.Job("serveapi", listenURL.String()) + job.SetenvBool("Logging", true) + job.SetenvBool("Tls", true) + job.SetenvBool("TlsVerify", true) + job.Setenv("TlsCa", cacert) + job.Setenv("TlsCert", cert) + job.Setenv("TlsKey", key) + if err := job.Run(); err != nil { + log.Fatalf("Unable to spawn the test daemon: %s", err) + } + }() + + // Give some time to ListenAndServer to actually start + time.Sleep(time.Second) + + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatalf("Unable to accept connections for test api: %s", err) + } + return eng +} + +// FIXME: test that ImagePull(json=true) send correct json output + +func GetTestImage(daemon *daemon.Daemon) *image.Image { + imgs, err := daemon.Graph().Map() + if err != nil { + log.Fatalf("Unable to get the test image: %s", err) + } + for _, image := range imgs { + if image.ID == unitTestImageID { + return image + } + } + log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs) + return nil +} + +func TestDaemonCreate(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + + // Make sure we start we 0 containers + if len(daemon.List()) != 0 { + t.Errorf("Expected 0 containers, %v found", len(daemon.List())) + } + + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"ls", "-al"}, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := daemon.Destroy(container); err != nil { + t.Error(err) + } + }() + + // Make sure we can find the newly created container with List() + if len(daemon.List()) != 1 { + t.Errorf("Expected 1 container, %v found", len(daemon.List())) + } + + // Make sure the container List() returns is the right one + if daemon.List()[0].ID != container.ID { + t.Errorf("Unexpected container %v returned by List", daemon.List()[0]) + } + + // Make sure we can get the container with Get() + if daemon.Get(container.ID) == nil { + t.Errorf("Unable to get newly created container") + } + + // Make sure it is the right container + if daemon.Get(container.ID) != container { + t.Errorf("Get() returned the wrong container") + } + + // Make sure Exists returns it as existing + if !daemon.Exists(container.ID) { + t.Errorf("Exists() returned false for a newly created container") + } + + // Test that conflict error displays correct details + testContainer, _, _ := daemon.Create( + &runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"ls", "-al"}, + }, + &runconfig.HostConfig{}, + "conflictname", + ) + if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { + t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error()) + } + + // Make sure create with bad parameters returns an error + if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, &runconfig.HostConfig{}, ""); err == nil { + t.Fatal("Builder.Create should throw an error when Cmd is missing") + } + + if _, _, err := daemon.Create( + &runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{}, + }, + &runconfig.HostConfig{}, + "", + ); err == nil { + t.Fatal("Builder.Create should throw an error when Cmd is empty") + } + + config := &runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"/bin/ls"}, + PortSpecs: []string{"80"}, + } + container, _, err = daemon.Create(config, &runconfig.HostConfig{}, "") + + _, err = daemon.Commit(container, "testrepo", "testtag", "", "", true, config) + if err != nil { + t.Error(err) + } + + // test expose 80:8000 + container, warnings, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"ls", "-al"}, + PortSpecs: []string{"80:8000"}, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + t.Fatal(err) + } + if warnings == nil || len(warnings) != 1 { + t.Error("Expected a warning, got none") + } +} + +func TestDestroy(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"ls", "-al"}, + }, + &runconfig.HostConfig{}, + "") + if err != nil { + t.Fatal(err) + } + // Destroy + if err := daemon.Destroy(container); err != nil { + t.Error(err) + } + + // Make sure daemon.Exists() behaves correctly + if daemon.Exists("test_destroy") { + t.Errorf("Exists() returned true") + } + + // Make sure daemon.List() doesn't list the destroyed container + if len(daemon.List()) != 0 { + t.Errorf("Expected 0 container, %v found", len(daemon.List())) + } + + // Make sure daemon.Get() refuses to return the unexisting container + if daemon.Get(container.ID) != nil { + t.Errorf("Unable to get newly created container") + } + + // Test double destroy + if err := daemon.Destroy(container); err == nil { + // It should have failed + t.Errorf("Double destroy did not fail") + } +} + +func TestGet(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + + container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) + defer daemon.Destroy(container1) + + container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) + defer daemon.Destroy(container2) + + container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) + defer daemon.Destroy(container3) + + if daemon.Get(container1.ID) != container1 { + t.Errorf("Get(test1) returned %v while expecting %v", daemon.Get(container1.ID), container1) + } + + if daemon.Get(container2.ID) != container2 { + t.Errorf("Get(test2) returned %v while expecting %v", daemon.Get(container2.ID), container2) + } + + if daemon.Get(container3.ID) != container3 { + t.Errorf("Get(test3) returned %v while expecting %v", daemon.Get(container3.ID), container3) + } + +} + +func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) { + var ( + err error + id string + outputBuffer = bytes.NewBuffer(nil) + strPort string + eng = NewTestEngine(t) + daemon = mkDaemonFromEngine(eng, t) + port = 5554 + p nat.Port + ) + defer func() { + if err != nil { + daemon.Nuke() + } + }() + + for { + port += 1 + strPort = strconv.Itoa(port) + var cmd string + if proto == "tcp" { + cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat" + } else if proto == "udp" { + cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat" + } else { + t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) + } + ep := make(map[nat.Port]struct{}, 1) + p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto)) + ep[p] = struct{}{} + + jobCreate := eng.Job("create") + jobCreate.Setenv("Image", unitTestImageID) + jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) + jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) + jobCreate.SetenvJson("ExposedPorts", ep) + jobCreate.Stdout.Add(outputBuffer) + if err := jobCreate.Run(); err != nil { + t.Fatal(err) + } + id = engine.Tail(outputBuffer, 1) + // FIXME: this relies on the undocumented behavior of daemon.Create + // which will return a nil error AND container if the exposed ports + // are invalid. That behavior should be fixed! + if id != "" { + break + } + t.Logf("Port %v already in use, trying another one", strPort) + + } + + jobStart := eng.Job("start", id) + portBindings := make(map[nat.Port][]nat.PortBinding) + portBindings[p] = []nat.PortBinding{ + {}, + } + if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { + t.Fatal(err) + } + if err := jobStart.Run(); err != nil { + t.Fatal(err) + } + + container := daemon.Get(id) + if container == nil { + t.Fatalf("Couldn't fetch test container %s", id) + } + + setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { + for !container.IsRunning() { + time.Sleep(10 * time.Millisecond) + } + }) + + // Even if the state is running, lets give some time to lxc to spawn the process + container.WaitStop(500 * time.Millisecond) + + strPort = container.NetworkSettings.Ports[p][0].HostPort + return daemon, container, strPort +} + +// Run a container with a TCP port allocated, and test that it can receive connections on localhost +func TestAllocateTCPPortLocalhost(t *testing.T) { + daemon, container, port := startEchoServerContainer(t, "tcp") + defer nuke(daemon) + defer container.Kill() + + for i := 0; i != 10; i++ { + conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port)) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + input := bytes.NewBufferString("well hello there\n") + _, err = conn.Write(input.Bytes()) + if err != nil { + t.Fatal(err) + } + buf := make([]byte, 16) + read := 0 + conn.SetReadDeadline(time.Now().Add(3 * time.Second)) + read, err = conn.Read(buf) + if err != nil { + if err, ok := err.(*net.OpError); ok { + if err.Err == syscall.ECONNRESET { + t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec") + conn.Close() + time.Sleep(time.Second) + continue + } + if err.Timeout() { + t.Log("Timeout, trying again") + conn.Close() + continue + } + } + t.Fatal(err) + } + output := string(buf[:read]) + if !strings.Contains(output, "well hello there") { + t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output)) + } else { + return + } + } + + t.Fatal("No reply from the container") +} + +// Run a container with an UDP port allocated, and test that it can receive connections on localhost +func TestAllocateUDPPortLocalhost(t *testing.T) { + daemon, container, port := startEchoServerContainer(t, "udp") + defer nuke(daemon) + defer container.Kill() + + conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port)) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + input := bytes.NewBufferString("well hello there\n") + buf := make([]byte, 16) + // Try for a minute, for some reason the select in socat may take ages + // to return even though everything on the path seems fine (i.e: the + // UDPProxy forwards the traffic correctly and you can see the packets + // on the interface from within the container). + for i := 0; i != 120; i++ { + _, err := conn.Write(input.Bytes()) + if err != nil { + t.Fatal(err) + } + conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) + read, err := conn.Read(buf) + if err == nil { + output := string(buf[:read]) + if strings.Contains(output, "well hello there") { + return + } + } + } + + t.Fatal("No reply from the container") +} + +func TestRestore(t *testing.T) { + eng := NewTestEngine(t) + daemon1 := mkDaemonFromEngine(eng, t) + defer daemon1.Nuke() + // Create a container with one instance of docker + container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t) + defer daemon1.Destroy(container1) + + // Create a second container meant to be killed + container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t) + defer daemon1.Destroy(container2) + + // Start the container non blocking + if err := container2.Start(); err != nil { + t.Fatal(err) + } + + if !container2.IsRunning() { + t.Fatalf("Container %v should appear as running but isn't", container2.ID) + } + + // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running' + cStdin, _ := container2.StdinPipe() + cStdin.Close() + if _, err := container2.WaitStop(2 * time.Second); err != nil { + t.Fatal(err) + } + container2.SetRunning(42) + container2.ToDisk() + + if len(daemon1.List()) != 2 { + t.Errorf("Expected 2 container, %v found", len(daemon1.List())) + } + if err := container1.Run(); err != nil { + t.Fatal(err) + } + + if !container2.IsRunning() { + t.Fatalf("Container %v should appear as running but isn't", container2.ID) + } + + // Here are are simulating a docker restart - that is, reloading all containers + // from scratch + eng = newTestEngine(t, false, daemon1.Config().Root) + daemon2 := mkDaemonFromEngine(eng, t) + if len(daemon2.List()) != 2 { + t.Errorf("Expected 2 container, %v found", len(daemon2.List())) + } + runningCount := 0 + for _, c := range daemon2.List() { + if c.IsRunning() { + t.Errorf("Running container found: %v (%v)", c.ID, c.Path) + runningCount++ + } + } + if runningCount != 0 { + t.Fatalf("Expected 0 container alive, %d found", runningCount) + } + container3 := daemon2.Get(container1.ID) + if container3 == nil { + t.Fatal("Unable to Get container") + } + if err := container3.Run(); err != nil { + t.Fatal(err) + } + container2.SetStopped(0) +} + +func TestDefaultContainerName(t *testing.T) { + eng := NewTestEngine(t) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) + + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + container := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) + containerID := container.ID + + if container.Name != "/some_name" { + t.Fatalf("Expect /some_name got %s", container.Name) + } + + if c := daemon.Get("/some_name"); c == nil { + t.Fatalf("Couldn't retrieve test container as /some_name") + } else if c.ID != containerID { + t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) + } +} + +func TestRandomContainerName(t *testing.T) { + eng := NewTestEngine(t) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) + + config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + container := daemon.Get(createTestContainer(eng, config, t)) + containerID := container.ID + + if container.Name == "" { + t.Fatalf("Expected not empty container name") + } + + if c := daemon.Get(container.Name); c == nil { + log.Fatalf("Could not lookup container %s by its name", container.Name) + } else if c.ID != containerID { + log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) + } +} + +func TestContainerNameValidation(t *testing.T) { + eng := NewTestEngine(t) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) + + for _, test := range []struct { + Name string + Valid bool + }{ + {"abc-123_AAA.1", true}, + {"\000asdf", false}, + } { + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + if err != nil { + if !test.Valid { + continue + } + t.Fatal(err) + } + + var outputBuffer = bytes.NewBuffer(nil) + job := eng.Job("create", test.Name) + if err := job.ImportEnv(config); err != nil { + t.Fatal(err) + } + job.Stdout.Add(outputBuffer) + if err := job.Run(); err != nil { + if !test.Valid { + continue + } + t.Fatal(err) + } + + container := daemon.Get(engine.Tail(outputBuffer, 1)) + + if container.Name != "/"+test.Name { + t.Fatalf("Expect /%s got %s", test.Name, container.Name) + } + + if c := daemon.Get("/" + test.Name); c == nil { + t.Fatalf("Couldn't retrieve test container as /%s", test.Name) + } else if c.ID != container.ID { + t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) + } + } + +} + +func TestLinkChildContainer(t *testing.T) { + eng := NewTestEngine(t) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) + + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) + + webapp, err := daemon.GetByName("/webapp") + if err != nil { + t.Fatal(err) + } + + if webapp.ID != container.ID { + t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) + } + + config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + childContainer := daemon.Get(createTestContainer(eng, config, t)) + + if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { + t.Fatal(err) + } + + // Get the child by it's new name + db, err := daemon.GetByName("/webapp/db") + if err != nil { + t.Fatal(err) + } + if db.ID != childContainer.ID { + t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID) + } +} + +func TestGetAllChildren(t *testing.T) { + eng := NewTestEngine(t) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) + + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) + + webapp, err := daemon.GetByName("/webapp") + if err != nil { + t.Fatal(err) + } + + if webapp.ID != container.ID { + t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) + } + + config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + childContainer := daemon.Get(createTestContainer(eng, config, t)) + + if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { + t.Fatal(err) + } + + children, err := daemon.Children("/webapp") + if err != nil { + t.Fatal(err) + } + + if children == nil { + t.Fatal("Children should not be nil") + } + if len(children) == 0 { + t.Fatal("Children should not be empty") + } + + for key, value := range children { + if key != "/webapp/db" { + t.Fatalf("Expected /webapp/db got %s", key) + } + if value.ID != childContainer.ID { + t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID) + } + } +} + +func TestDestroyWithInitLayer(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"ls", "-al"}, + }, + &runconfig.HostConfig{}, + "") + + if err != nil { + t.Fatal(err) + } + // Destroy + if err := daemon.Destroy(container); err != nil { + t.Fatal(err) + } + + // Make sure daemon.Exists() behaves correctly + if daemon.Exists("test_destroy") { + t.Fatalf("Exists() returned true") + } + + // Make sure daemon.List() doesn't list the destroyed container + if len(daemon.List()) != 0 { + t.Fatalf("Expected 0 container, %v found", len(daemon.List())) + } + + driver := daemon.Graph().Driver() + + // Make sure that the container does not exist in the driver + if _, err := driver.Get(container.ID, ""); err == nil { + t.Fatal("Conttainer should not exist in the driver") + } + + // Make sure that the init layer is removed from the driver + if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID), ""); err == nil { + t.Fatal("Container's init layer should not exist in the driver") + } +} diff --git a/integration/server_test.go b/integration/server_test.go new file mode 100644 index 00000000..a9039995 --- /dev/null +++ b/integration/server_test.go @@ -0,0 +1,295 @@ +package docker + +import ( + "bytes" + "testing" + "time" + + "github.com/docker/docker/engine" +) + +func TestCreateNumberHostname(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + createTestContainer(eng, config, t) +} + +func TestCommit(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"}, nil) + if err != nil { + t.Fatal(err) + } + + id := createTestContainer(eng, config, t) + + job := eng.Job("commit", id) + job.Setenv("repo", "testrepo") + job.Setenv("tag", "testtag") + job.SetenvJson("config", config) + if err := job.Run(); err != nil { + t.Fatal(err) + } +} + +func TestMergeConfigOnCommit(t *testing.T) { + eng := NewTestEngine(t) + runtime := mkDaemonFromEngine(eng, t) + defer runtime.Nuke() + + container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t) + defer runtime.Destroy(container1) + + config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"}, nil) + if err != nil { + t.Error(err) + } + + job := eng.Job("commit", container1.ID) + job.Setenv("repo", "testrepo") + job.Setenv("tag", "testtag") + job.SetenvJson("config", config) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) + if err := job.Run(); err != nil { + t.Error(err) + } + + container2, _, _ := mkContainer(runtime, []string{engine.Tail(outputBuffer, 1)}, t) + defer runtime.Destroy(container2) + + job = eng.Job("container_inspect", container1.Name) + baseContainer, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + t.Error(err) + } + + job = eng.Job("container_inspect", container2.Name) + commitContainer, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + t.Error(err) + } + + baseConfig := baseContainer.GetSubEnv("Config") + commitConfig := commitContainer.GetSubEnv("Config") + + if commitConfig.Get("Env") != baseConfig.Get("Env") { + t.Fatalf("Env config in committed container should be %v, was %v", + baseConfig.Get("Env"), commitConfig.Get("Env")) + } + + if baseConfig.Get("Cmd") != "[\"echo test \\u003e /tmp/foo\"]" { + t.Fatalf("Cmd in base container should be [\"echo test \\u003e /tmp/foo\"], was %s", + baseConfig.Get("Cmd")) + } + + if commitConfig.Get("Cmd") != "[\"cat /tmp/foo\"]" { + t.Fatalf("Cmd in committed container should be [\"cat /tmp/foo\"], was %s", + commitConfig.Get("Cmd")) + } +} + +func TestRestartKillWait(t *testing.T) { + eng := NewTestEngine(t) + runtime := mkDaemonFromEngine(eng, t) + defer runtime.Nuke() + + config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + if err != nil { + t.Fatal(err) + } + + id := createTestContainer(eng, config, t) + + job := eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + job = eng.Job("start", id) + if err := job.ImportEnv(hostConfig); err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + job = eng.Job("kill", id) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + eng = newTestEngine(t, false, runtime.Config().Root) + + job = eng.Job("containers") + job.SetenvBool("all", true) + outs, err = job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { + job = eng.Job("wait", outs.Data[0].Get("Id")) + if err := job.Run(); err != nil { + t.Fatal(err) + } + }) +} + +func TestCreateStartRestartStopStartKillRm(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + if err != nil { + t.Fatal(err) + } + + id := createTestContainer(eng, config, t) + + job := eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + job = eng.Job("start", id) + if err := job.ImportEnv(hostConfig); err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + job = eng.Job("restart", id) + job.SetenvInt("t", 2) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + job = eng.Job("stop", id) + job.SetenvInt("t", 2) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + job = eng.Job("start", id) + if err := job.ImportEnv(hostConfig); err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if err := eng.Job("kill", id).Run(); err != nil { + t.Fatal(err) + } + + // FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty") + job = eng.Job("rm", id) + job.SetenvBool("removeVolume", true) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + job = eng.Job("containers") + job.SetenvBool("all", true) + outs, err = job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 0 { + t.Errorf("Expected 0 container, %v found", len(outs.Data)) + } +} + +func TestRunWithTooLowMemoryLimit(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. + job := eng.Job("create") + job.Setenv("Image", unitTestImageID) + job.Setenv("Memory", "524287") + job.Setenv("CpuShares", "1000") + job.SetenvList("Cmd", []string{"/bin/cat"}) + if err := job.Run(); err == nil { + t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!") + } +} + +func TestImagesFilter(t *testing.T) { + eng := NewTestEngine(t) + defer nuke(mkDaemonFromEngine(eng, t)) + + if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { + t.Fatal(err) + } + + if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil { + t.Fatal(err) + } + + if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil { + t.Fatal(err) + } + + images := getImages(eng, t, false, "utest*/*") + + if len(images.Data[0].GetList("RepoTags")) != 2 { + t.Fatal("incorrect number of matches returned") + } + + images = getImages(eng, t, false, "utest") + + if len(images.Data[0].GetList("RepoTags")) != 1 { + t.Fatal("incorrect number of matches returned") + } + + images = getImages(eng, t, false, "utest*") + + if len(images.Data[0].GetList("RepoTags")) != 1 { + t.Fatal("incorrect number of matches returned") + } + + images = getImages(eng, t, false, "*5000*/*") + + if len(images.Data[0].GetList("RepoTags")) != 1 { + t.Fatal("incorrect number of matches returned") + } +} diff --git a/integration/utils_test.go b/integration/utils_test.go new file mode 100644 index 00000000..e1abfa72 --- /dev/null +++ b/integration/utils_test.go @@ -0,0 +1,360 @@ +package docker + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/builtins" + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +// This file contains utility functions for docker's unit test suite. +// It has to be named XXX_test.go, apparently, in other to access private functions +// from other XXX_test.go functions. + +// Create a temporary daemon suitable for unit testing. +// Call t.Fatal() at the first error. +func mkDaemon(f log.Fataler) *daemon.Daemon { + eng := newTestEngine(f, false, "") + return mkDaemonFromEngine(eng, f) + // FIXME: + // [...] + // Mtu: docker.GetDefaultNetworkMtu(), + // [...] +} + +func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler, name string) (shortId string) { + job := eng.Job("create", name) + if err := job.ImportEnv(config); err != nil { + f.Fatal(err) + } + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) + if err := job.Run(); err != nil { + f.Fatal(err) + } + return engine.Tail(outputBuffer, 1) +} + +func createTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler) (shortId string) { + return createNamedTestContainer(eng, config, f, "") +} + +func startContainer(eng *engine.Engine, id string, t log.Fataler) { + job := eng.Job("start", id) + if err := job.Run(); err != nil { + t.Fatal(err) + } +} + +func containerRun(eng *engine.Engine, id string, t log.Fataler) { + startContainer(eng, id, t) + containerWait(eng, id, t) +} + +func containerFileExists(eng *engine.Engine, id, dir string, t log.Fataler) bool { + c := getContainer(eng, id, t) + if err := c.Mount(); err != nil { + t.Fatal(err) + } + defer c.Unmount() + if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil { + if os.IsNotExist(err) { + return false + } + t.Fatal(err) + } + return true +} + +func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteCloser, io.ReadCloser) { + c := getContainer(eng, id, t) + i, err := c.StdinPipe() + if err != nil { + t.Fatal(err) + } + o, err := c.StdoutPipe() + if err != nil { + t.Fatal(err) + } + return i, o +} + +func containerWait(eng *engine.Engine, id string, t log.Fataler) int { + ex, _ := getContainer(eng, id, t).WaitStop(-1 * time.Second) + return ex +} + +func containerWaitTimeout(eng *engine.Engine, id string, t log.Fataler) error { + _, err := getContainer(eng, id, t).WaitStop(500 * time.Millisecond) + return err +} + +func containerKill(eng *engine.Engine, id string, t log.Fataler) { + if err := eng.Job("kill", id).Run(); err != nil { + t.Fatal(err) + } +} + +func containerRunning(eng *engine.Engine, id string, t log.Fataler) bool { + return getContainer(eng, id, t).IsRunning() +} + +func containerAssertExists(eng *engine.Engine, id string, t log.Fataler) { + getContainer(eng, id, t) +} + +func containerAssertNotExists(eng *engine.Engine, id string, t log.Fataler) { + daemon := mkDaemonFromEngine(eng, t) + if c := daemon.Get(id); c != nil { + t.Fatal(fmt.Errorf("Container %s should not exist", id)) + } +} + +// assertHttpNotError expect the given response to not have an error. +// Otherwise the it causes the test to fail. +func assertHttpNotError(r *httptest.ResponseRecorder, t log.Fataler) { + // Non-error http status are [200, 400) + if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { + t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) + } +} + +// assertHttpError expect the given response to have an error. +// Otherwise the it causes the test to fail. +func assertHttpError(r *httptest.ResponseRecorder, t log.Fataler) { + // Non-error http status are [200, 400) + if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) { + t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code)) + } +} + +func getContainer(eng *engine.Engine, id string, t log.Fataler) *daemon.Container { + daemon := mkDaemonFromEngine(eng, t) + c := daemon.Get(id) + if c == nil { + t.Fatal(fmt.Errorf("No such container: %s", id)) + } + return c +} + +func mkDaemonFromEngine(eng *engine.Engine, t log.Fataler) *daemon.Daemon { + iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon") + if iDaemon == nil { + panic("Legacy daemon field not set in engine") + } + daemon, ok := iDaemon.(*daemon.Daemon) + if !ok { + panic("Legacy daemon field in engine does not cast to *daemon.Daemon") + } + return daemon +} + +func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine { + if root == "" { + if dir, err := newTestDirectory(unitTestStoreBase); err != nil { + t.Fatal(err) + } else { + root = dir + } + } + os.MkdirAll(root, 0700) + + eng := engine.New() + eng.Logging = false + // Load default plugins + builtins.Register(eng) + // (This is manually copied and modified from main() until we have a more generic plugin system) + cfg := &daemon.Config{ + Root: root, + AutoRestart: autorestart, + ExecDriver: "native", + // Either InterContainerCommunication or EnableIptables must be set, + // otherwise NewDaemon will fail because of conflicting settings. + InterContainerCommunication: true, + } + d, err := daemon.NewDaemon(cfg, eng) + if err != nil { + t.Fatal(err) + } + if err := d.Install(eng); err != nil { + t.Fatal(err) + } + return eng +} + +func NewTestEngine(t log.Fataler) *engine.Engine { + return newTestEngine(t, false, "") +} + +func newTestDirectory(templateDir string) (dir string, err error) { + return utils.TestDirectory(templateDir) +} + +func getCallerName(depth int) string { + return utils.GetCallerName(depth) +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Call t.Fatal() at the first error. +func writeFile(dst, content string, t *testing.T) { + // Create subdirectories if necessary + if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { + t.Fatal(err) + } + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + t.Fatal(err) + } + // Write content (truncate if it exists) + if _, err := io.Copy(f, strings.NewReader(content)); err != nil { + t.Fatal(err) + } +} + +// Return the contents of file at path `src`. +// Call t.Fatal() at the first error (including if the file doesn't exist) +func readFile(src string, t *testing.T) (content string) { + f, err := os.Open(src) + if err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + return string(data) +} + +// Create a test container from the given daemon `r` and run arguments `args`. +// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is +// dynamically replaced by the current test image. +// The caller is responsible for destroying the container. +// Call t.Fatal() at the first error. +func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) { + config, hc, _, err := parseRun(args, nil) + defer func() { + if err != nil && t != nil { + t.Fatal(err) + } + }() + if err != nil { + return nil, nil, err + } + if config.Image == "_" { + config.Image = GetTestImage(r).ID + } + c, _, err := r.Create(config, nil, "") + if err != nil { + return nil, nil, err + } + // NOTE: hostConfig is ignored. + // If `args` specify privileged mode, custom lxc conf, external mount binds, + // port redirects etc. they will be ignored. + // This is because the correct way to set these things is to pass environment + // to the `start` job. + // FIXME: this helper function should be deprecated in favor of calling + // `create` and `start` jobs directly. + return c, hc, nil +} + +// Create a test container, start it, wait for it to complete, destroy it, +// and return its standard output as a string. +// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. +// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. +func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testing.T) (output string, err error) { + defer func() { + if err != nil && t != nil { + t.Fatal(err) + } + }() + container, hc, err := mkContainer(r, args, t) + if err != nil { + return "", err + } + defer r.Destroy(container) + stdout, err := container.StdoutPipe() + if err != nil { + return "", err + } + defer stdout.Close() + + job := eng.Job("start", container.ID) + if err := job.ImportEnv(hc); err != nil { + return "", err + } + if err := job.Run(); err != nil { + return "", err + } + + container.WaitStop(-1 * time.Second) + data, err := ioutil.ReadAll(stdout) + if err != nil { + return "", err + } + output = string(data) + return +} + +// FIXME: this is duplicated from graph_test.go in the docker package. +func fakeTar() (io.ReadCloser, error) { + content := []byte("Hello world!\n") + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { + hdr := new(tar.Header) + hdr.Size = int64(len(content)) + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + tw.Write([]byte(content)) + } + tw.Close() + return ioutil.NopCloser(buf), nil +} + +func getAllImages(eng *engine.Engine, t *testing.T) *engine.Table { + return getImages(eng, t, true, "") +} + +func getImages(eng *engine.Engine, t *testing.T, all bool, filter string) *engine.Table { + job := eng.Job("images") + job.SetenvBool("all", all) + job.Setenv("filter", filter) + images, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + return images + +} + +func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) { + cmd := flag.NewFlagSet("run", flag.ContinueOnError) + cmd.SetOutput(ioutil.Discard) + cmd.Usage = nil + return runconfig.Parse(cmd, args, sysInfo) +} diff --git a/integration/z_final_test.go b/integration/z_final_test.go new file mode 100644 index 00000000..ad1eb434 --- /dev/null +++ b/integration/z_final_test.go @@ -0,0 +1,17 @@ +package docker + +import ( + "github.com/docker/docker/utils" + "runtime" + "testing" +) + +func displayFdGoroutines(t *testing.T) { + t.Logf("Fds: %d, Goroutines: %d", utils.GetTotalUsedFds(), runtime.NumGoroutine()) +} + +func TestFinal(t *testing.T) { + nuke(globalDaemon) + t.Logf("Start Fds: %d, Start Goroutines: %d", startFds, startGoroutines) + displayFdGoroutines(t) +} diff --git a/links/links.go b/links/links.go new file mode 100644 index 00000000..d2d69939 --- /dev/null +++ b/links/links.go @@ -0,0 +1,137 @@ +package links + +import ( + "fmt" + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" + "path" + "strings" +) + +type Link struct { + ParentIP string + ChildIP string + Name string + ChildEnvironment []string + Ports []nat.Port + IsEnabled bool + eng *engine.Engine +} + +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}, eng *engine.Engine) (*Link, error) { + + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + l := &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + eng: eng, + } + return l, nil + +} + +func (l *Link) Alias() string { + _, alias := path.Split(l.Name) + return alias +} + +func (l *Link) ToEnv() []string { + env := []string{} + alias := strings.Replace(strings.ToUpper(l.Alias()), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + // Load exposed ports into the environment + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.Split(v, "=") + if len(parts) != 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} + +func (l *Link) Enable() error { + if err := l.toggle("-I", false); err != nil { + return err + } + l.IsEnabled = true + return nil +} + +func (l *Link) Disable() { + // We do not care about errors here because the link may not + // exist in iptables + l.toggle("-D", true) + + l.IsEnabled = false +} + +func (l *Link) toggle(action string, ignoreErrors bool) error { + job := l.eng.Job("link", action) + + job.Setenv("ParentIP", l.ParentIP) + job.Setenv("ChildIP", l.ChildIP) + job.SetenvBool("IgnoreErrors", ignoreErrors) + + out := make([]string, len(l.Ports)) + for i, p := range l.Ports { + out[i] = fmt.Sprintf("%s/%s", p.Port(), p.Proto()) + } + job.SetenvList("Ports", out) + + if err := job.Run(); err != nil { + // TODO: get ouput from job + return err + } + return nil +} diff --git a/links/links_test.go b/links/links_test.go new file mode 100644 index 00000000..c26559e5 --- /dev/null +++ b/links/links_test.go @@ -0,0 +1,109 @@ +package links + +import ( + "github.com/docker/docker/nat" + "strings" + "testing" +) + +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports, nil) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatalf("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + +func TestLinkNew(t *testing.T) { + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports, nil) + if err != nil { + t.Fatal(err) + } + + if link == nil { + t.FailNow() + } + if link.Name != "/db/docker" { + t.Fail() + } + if link.Alias() != "docker" { + t.Fail() + } + if link.ParentIP != "172.0.17.3" { + t.Fail() + } + if link.ChildIP != "172.0.17.2" { + t.Fail() + } + for _, p := range link.Ports { + if p != nat.Port("6379/tcp") { + t.Fail() + } + } +} + +func TestLinkEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} diff --git a/nat/nat.go b/nat/nat.go new file mode 100644 index 00000000..b0177289 --- /dev/null +++ b/nat/nat.go @@ -0,0 +1,159 @@ +package nat + +// nat is a convenience package for docker's manipulation of strings describing +// network ports. + +import ( + "fmt" + "net" + "strconv" + "strings" + + "github.com/docker/docker/pkg/parsers" +) + +const ( + PortSpecTemplate = "ip:hostPort:containerPort" + PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort" +) + +type PortBinding struct { + HostIp string + HostPort string +} + +type PortMap map[Port][]PortBinding + +type PortSet map[Port]struct{} + +// 80/tcp +type Port string + +func NewPort(proto, port string) Port { + return Port(fmt.Sprintf("%s/%s", port, proto)) +} + +func ParsePort(rawPort string) (int, error) { + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +func (p Port) Proto() string { + parts := strings.Split(string(p), "/") + if len(parts) == 1 { + return "tcp" + } + return parts[1] +} + +func (p Port) Port() string { + return strings.Split(string(p), "/")[0] +} + +func (p Port) Int() int { + i, err := ParsePort(p.Port()) + if err != nil { + panic(err) + } + return i +} + +// Splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + var port string + var proto string + + parts := strings.Split(rawPort, "/") + + if len(parts) == 0 || parts[0] == "" { // we have "" or ""/ + port = "" + proto = "" + } else { // we have # or #/ or #/... + port = parts[0] + if len(parts) > 1 && parts[1] != "" { + proto = parts[1] // we have #/... + } else { + proto = "tcp" // we have # or #/ + } + } + return proto, port +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// We will receive port specs in the format of ip:public:private/proto and these need to be +// parsed in the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + + for _, rawPort := range ports { + proto := "tcp" + + if i := strings.LastIndex(rawPort, "/"); i != -1 { + proto = rawPort[i+1:] + rawPort = rawPort[:i] + } + if !strings.Contains(rawPort, ":") { + rawPort = fmt.Sprintf("::%s", rawPort) + } else if len(strings.Split(rawPort, ":")) == 2 { + rawPort = fmt.Sprintf(":%s", rawPort) + } + + parts, err := parsers.PartParser(PortSpecTemplate, rawPort) + if err != nil { + return nil, nil, err + } + + var ( + containerPort = parts["containerPort"] + rawIp = parts["ip"] + hostPort = parts["hostPort"] + ) + + if rawIp != "" && net.ParseIP(rawIp) == nil { + return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIp) + } + if containerPort == "" { + return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + } + if _, err := strconv.ParseUint(containerPort, 10, 16); err != nil { + return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil { + return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + + if !validateProto(proto) { + return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + } + + port := NewPort(proto, containerPort) + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + + binding := PortBinding{ + HostIp: rawIp, + HostPort: hostPort, + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, binding) + } + return exposedPorts, bindings, nil +} diff --git a/nat/nat_test.go b/nat/nat_test.go new file mode 100644 index 00000000..a8c2cb58 --- /dev/null +++ b/nat/nat_test.go @@ -0,0 +1,201 @@ +package nat + +import ( + "testing" +) + +func TestParsePort(t *testing.T) { + var ( + p int + err error + ) + + p, err = ParsePort("1234") + + if err != nil || p != 1234 { + t.Fatal("Parsing '1234' did not succeed") + } + + // FIXME currently this is a valid port. I don't think it should be. + // I'm leaving this test commented out until we make a decision. + // - erikh + + /* + p, err = ParsePort("0123") + + if err != nil { + t.Fatal("Successfully parsed port '0123' to '123'") + } + */ + + p, err = ParsePort("asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port 'asdf' succeeded") + } + + p, err = ParsePort("1asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port '1asdf' succeeded") + } +} + +func TestPort(t *testing.T) { + p := NewPort("tcp", "1234") + + if string(p) != "1234/tcp" { + t.Fatal("tcp, 1234 did not result in the string 1234/tcp") + } + + if p.Proto() != "tcp" { + t.Fatal("protocol was not tcp") + } + + if p.Port() != "1234" { + t.Fatal("port string value was not 1234") + } + + if p.Int() != 1234 { + t.Fatal("port int value was not 1234") + } +} + +func TestSplitProtoPort(t *testing.T) { + var ( + proto string + port string + ) + + proto, port = SplitProtoPort("1234/tcp") + + if proto != "tcp" || port != "1234" { + t.Fatal("Could not split 1234/tcp properly") + } + + proto, port = SplitProtoPort("") + + if proto != "" || port != "" { + t.Fatal("parsing an empty string yielded surprising results") + } + + proto, port = SplitProtoPort("1234") + + if proto != "tcp" || port != "1234" { + t.Fatal("tcp is not the default protocol for portspec '1234'") + } + + proto, port = SplitProtoPort("1234/") + + if proto != "tcp" || port != "1234" { + t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) + } + + proto, port = SplitProtoPort("/tcp") + + if proto != "" || port != "" { + t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) + } +} + +func TestParsePortSpecs(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "" { + t.Fatalf("HostIp should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "" { + t.Fatalf("HostIp should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "0.0.0.0" { + t.Fatalf("HostIp is not 0.0.0.0 for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} diff --git a/nat/sort.go b/nat/sort.go new file mode 100644 index 00000000..f36c12f7 --- /dev/null +++ b/nat/sort.go @@ -0,0 +1,28 @@ +package nat + +import "sort" + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} diff --git a/nat/sort_test.go b/nat/sort_test.go new file mode 100644 index 00000000..5d490e32 --- /dev/null +++ b/nat/sort_test.go @@ -0,0 +1,41 @@ +package nat + +import ( + "fmt" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} diff --git a/opts/envfile.go b/opts/envfile.go new file mode 100644 index 00000000..19ee8955 --- /dev/null +++ b/opts/envfile.go @@ -0,0 +1,54 @@ +package opts + +import ( + "bufio" + "fmt" + "os" + "strings" +) + +/* +Read in a line delimited file with environment variables enumerated +*/ +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + line := scanner.Text() + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + if strings.Contains(line, "=") { + data := strings.SplitN(line, "=", 2) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} + } + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) + } else { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) + } + } + } + return lines, nil +} + +var whiteSpaces = " \t" + +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} diff --git a/opts/ip.go b/opts/ip.go new file mode 100644 index 00000000..f8a493e6 --- /dev/null +++ b/opts/ip.go @@ -0,0 +1,31 @@ +package opts + +import ( + "fmt" + "net" +) + +type IpOpt struct { + *net.IP +} + +func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt { + o := &IpOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +func (o *IpOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + (*o.IP) = net.ParseIP(val) + return nil +} + +func (o *IpOpt) String() string { + return (*o.IP).String() +} diff --git a/opts/opts.go b/opts/opts.go new file mode 100644 index 00000000..4ca7ec58 --- /dev/null +++ b/opts/opts.go @@ -0,0 +1,229 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/api" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +func ListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, nil), names, usage) +} + +func HostListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, api.ValidateHost), names, usage) +} + +func IPListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, ValidateIPAddress), names, usage) +} + +func DnsSearchListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, ValidateDnsSearch), names, usage) +} + +func IPVar(value *net.IP, names []string, defaultValue, usage string) { + flag.Var(NewIpOpt(value, defaultValue), names, usage) +} + +func MirrorListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, ValidateMirror), names, usage) +} + +// ListOpts type +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *newListOptsRef(&values, validator) +} + +func newListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and add it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete remove the given element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +// FIXME: can we remove this? +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values' slice. +// FIXME: Can we remove this? +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// Get checks the existence of the given key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Validators +type ValidatorFctType func(val string) (string, error) + +func ValidateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR.") +} + +func ValidateLink(val string) (string, error) { + if _, err := parsers.PartParser("name:alias", val); err != nil { + return val, err + } + return val, nil +} + +func ValidatePath(val string) (string, error) { + var containerPath string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for volumes: %s", val) + } + + splited := strings.SplitN(val, ":", 2) + if len(splited) == 1 { + containerPath = splited[0] + val = filepath.Clean(splited[0]) + } else { + containerPath = splited[1] + val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1])) + } + + if !filepath.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if len(arr) > 1 { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// Validates domain for resolvconf search configuration. +// A zero length domain is represented by . +func ValidateDnsSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +func ValidateExtraHost(val string) (string, error) { + arr := strings.Split(val, ":") + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %s", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("bad format for add-host: %s", val) + } + return val, nil +} + +// Validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("%s is not a valid URI", val) + } + + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + } + + if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + } + + return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil +} diff --git a/opts/opts_test.go b/opts/opts_test.go new file mode 100644 index 00000000..09b5aa78 --- /dev/null +++ b/opts/opts_test.go @@ -0,0 +1,90 @@ +package opts + +import ( + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestListOpts(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + o.String() +} + +func TestValidateDnsSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + } + + for _, domain := range valid { + if ret, err := ValidateDnsSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDnsSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} diff --git a/pkg/README.md b/pkg/README.md new file mode 100644 index 00000000..c4b78a8a --- /dev/null +++ b/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. + +Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Docker organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/pkg/archive/MAINTAINERS b/pkg/archive/MAINTAINERS new file mode 100644 index 00000000..2aac7265 --- /dev/null +++ b/pkg/archive/MAINTAINERS @@ -0,0 +1,2 @@ +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) diff --git a/pkg/archive/README.md b/pkg/archive/README.md new file mode 100644 index 00000000..7307d969 --- /dev/null +++ b/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go new file mode 100644 index 00000000..74c60145 --- /dev/null +++ b/pkg/archive/archive.go @@ -0,0 +1,754 @@ +package archive + +import ( + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + Archive io.ReadCloser + ArchiveReader io.Reader + Compression int + TarOptions struct { + Includes []string + Excludes []string + Compression Compression + NoLchown bool + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar} +) + +const ( + Uncompressed Compression = iota + Bzip2 + Gzip + Xz +) + +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + log.Debugf("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return CmdStream(exec.Command(args[0], args[1:]...), archive) +} + +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil { + return nil, err + } + log.Debugf("[tar autodetect] n: %v", bs) + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + + if fi.IsDir() && !strings.HasSuffix(name, "/") { + name = name + "/" + } + + hdr.Name = name + + stat, ok := fi.Sys().(*syscall.Stat_t) + if ok { + // Currently go does not fill in the major/minors + if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || + stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { + hdr.Devmajor = int64(major(uint64(stat.Rdev))) + hdr.Devminor = int64(minor(uint64(stat.Rdev))) + } + + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + if err := tw.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + twBuf.Reset(tw) + _, err = io.Copy(twBuf, file) + file.Close() + if err != nil { + return err + } + err = twBuf.Flush() + if err != nil { + return err + } + twBuf.Reset(nil) + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + log.Debugf("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown { + return err + } + + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + return err + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and + if hdr.Typeflag != tar.TypeSymlink { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } else { + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +func escapeName(name string) string { + escaped := make([]byte, 0) + for i, c := range []byte(name) { + if i == 0 && c == '/' { + continue + } + // all printable chars except "-" which is 0x2d + if (0x20 <= c && c <= 0x7E) && c != 0x2d { + escaped = append(escaped, c) + } else { + escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) + } + } + return string(escaped) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + tw := tar.NewWriter(compressWriter) + + go func() { + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + if options.Includes == nil { + options.Includes = []string{"."} + } + + twBuf := pools.BufioWriter32KPool.Get(nil) + defer pools.BufioWriter32KPool.Put(twBuf) + + for _, include := range options.Includes { + filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { + if err != nil { + log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil { + return nil + } + + skip, err := fileutils.Matches(relFilePath, options.Excludes) + if err != nil { + log.Debugf("Error matching %s", relFilePath, err) + return err + } + + if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { + log.Debugf("Can't add file %s to tar: %s", srcPath, err) + } + return nil + }) + } + + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Debugf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + log.Debugf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + log.Debugf("Can't close pipe writer: %s", err) + } + }() + + return pipeReader, nil +} + +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/" + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.Excludes { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + if !strings.HasSuffix(hdr.Name, "/") { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0777) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, "..") { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if fi.IsDir() && hdr.Name == "." { + continue + } + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(archive io.Reader, dest string, options *TarOptions) error { + if archive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.Excludes == nil { + options.Excludes = []string{} + } + decompressedArchive, err := DecompressStream(archive) + if err != nil { + return err + } + defer decompressedArchive.Close() + return Unpack(decompressedArchive, dest, options) +} + +func (archiver *Archiver) TarUntar(src, dst string) error { + log.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + return archiver.Untar(archive, dst, nil) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + if err := archiver.Untar(archive, dst, nil); err != nil { + return err + } + return nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + // Create dst, copy src's content into it + log.Debugf("Creating dest directory: %s", dst) + if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { + return err + } + log.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + log.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + // Clean up the trailing / + if dst[len(dst)-1] == '/' { + dst = path.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + return archiver.Untar(r, filepath.Dir(dst), nil) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/', the final destination path +// will be `dst/base(src)`. +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// CmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + if input != nil { + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + // Write stdin if any + go func() { + io.Copy(stdin, input) + stdin.Close() + }() + } + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + pipeR, pipeW := io.Pipe() + errChan := make(chan []byte) + // Collect stderr, we will use it in case of an error + go func() { + errText, e := ioutil.ReadAll(stderr) + if e != nil { + errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") + } + errChan <- errText + }() + // Copy stdout to the returned pipe + go func() { + _, err := io.Copy(pipeW, stdout) + if err != nil { + pipeW.CloseWithError(err) + } + errText := <-errChan + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) + } else { + pipeW.Close() + } + }() + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if err = f.Sync(); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{f, size}, nil +} + +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + if err != nil { + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go new file mode 100644 index 00000000..7c9db444 --- /dev/null +++ b/pkg/archive/archive_test.go @@ -0,0 +1,448 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, err := CmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + Excludes: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarWithOptions(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{Includes: []string{"1"}}, 1}, + {&TarOptions{Excludes: []string{"2"}}, 1}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.SetBytes(int64(n)) + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go new file mode 100644 index 00000000..5fbdcc90 --- /dev/null +++ b/pkg/archive/changes.go @@ -0,0 +1,411 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +type ChangeType int + +const ( + ChangeModify = iota + ChangeAdd + ChangeDelete +) + +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + var changes []Change + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + path = filepath.Join("/", path) + + // Skip root + if path == "/" { + return nil + } + + // Skip AUFS metadata + if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched { + return err + } + + change := Change{ + Path: path, + } + + // Find out what kind of modification happened + file := filepath.Base(path) + // If there is a whiteout, then the file was removed + if strings.HasPrefix(file, ".wh.") { + originalFile := file[len(".wh."):] + change.Path = filepath.Join(filepath.Dir(path), originalFile) + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +type FileInfo struct { + parent *FileInfo + name string + stat syscall.Stat_t + children map[string]*FileInfo + capability []byte + added bool +} + +func (root *FileInfo) LookUp(path string) *FileInfo { + parent := root + if path == "/" { + return root + } + + pathElements := strings.Split(path, "/") + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + return "/" + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := &oldChild.stat + newStat := &newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if oldStat.Mode != newStat.Mode || + oldStat.Uid != newStat.Uid || + oldStat.Gid != newStat.Gid || + oldStat.Rdev != newStat.Rdev || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || + !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + root := &FileInfo{ + name: "/", + children: make(map[string]*FileInfo), + } + return root +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + relPath = filepath.Join("/", relPath) + + if relPath == "/" { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + if err := syscall.Lstat(path, &info.stat); err != nil { + return err + } + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + if oldDir != "" { + oldRoot, err1 = collectFileInfo(oldDir) + } + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, err + } + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var size int64 + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, _ := os.Lstat(file) + if fileInfo != nil && !fileInfo.IsDir() { + size += fileInfo.Size() + } + } + } + return size +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change) (Archive, error) { + reader, writer := io.Pipe() + tw := tar.NewWriter(writer) + + go func() { + twBuf := pools.BufioWriter32KPool.Get(nil) + defer pools.BufioWriter32KPool.Put(twBuf) + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := tw.WriteHeader(hdr); err != nil { + log.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { + log.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Debugf("Can't close layer: %s", err) + } + writer.Close() + }() + return reader, nil +} diff --git a/pkg/archive/changes_test.go b/pkg/archive/changes_test.go new file mode 100644 index 00000000..34c0f0da --- /dev/null +++ b/pkg/archive/changes_test.go @@ -0,0 +1,301 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "sort" + "testing" + "time" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +// Helper to sort []Change by path +type byPath struct{ changes []Change } + +func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path } +func (b byPath) Len() int { return len(b.changes) } +func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] } + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := os.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +// Create an directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(byPath{changes}) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dir3", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go new file mode 100644 index 00000000..80bb1974 --- /dev/null +++ b/pkg/archive/diff.go @@ -0,0 +1,167 @@ +package archive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/pools" +) + +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor +func mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} + +func UnpackLayer(dest string, layer ArchiveReader) error { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + if !strings.HasSuffix(hdr.Name, "/") { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0600) + if err != nil { + return err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, ".wh..wh.") { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { + return err + } + } + continue + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, "..") { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, ".wh.") { + originalBase := base[len(".wh."):] + originalPath := filepath.Join(filepath.Dir(path), originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return err + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + } + return nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. +func ApplyLayer(dest string, layer ArchiveReader) error { + dest = filepath.Clean(dest) + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + layer, err := DecompressStream(layer) + if err != nil { + return err + } + return UnpackLayer(dest, layer) +} diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go new file mode 100644 index 00000000..758c4115 --- /dev/null +++ b/pkg/archive/diff_test.go @@ -0,0 +1,191 @@ +package archive + +import ( + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/pkg/archive/testdata/broken.tar b/pkg/archive/testdata/broken.tar new file mode 100644 index 0000000000000000000000000000000000000000..8f10ea6b87d3eb4fed572349dfe87695603b10a5 GIT binary patch literal 13824 zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6 zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{)C_QVb?f0pB4xfD_C1pX2f z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O@R9>nS~7H1w&*U zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}& zT~f;Cd!ZOC&mX2n zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&h(Hc32JVwt-Hrj<{`vG3V< zCk?#){6BW>!9@+(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=tS5&i^O)@Me!3BwBQ`@=VE zIl)Fp0MG z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk@mb9|fb)1BuBGk_ptuvx%G~pq0Kb zb&?6Szj_3#ClOiI_3vu1e+mOX z9k`Og2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds> zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT@5hDK4~ z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr@4W|(?6Ye5$Oayf(LUxEb zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@ zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p} zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg@lop12w4VYz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$ z?d`g5*7a@bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8( zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+@sRbyfJf~*mY z#+u;OA2B@66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldTtcr!I|PQf({z2i zZs;`}x~m6ks)bXh@+($$(s>pJ`5X6~16{UfoJC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm zVNC%z6l$8Qz0LiPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb= z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI= zd);1Ux&vAHF3sW+ZYtInM5`7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5 z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr(N`)UtH54-56s#rGO&e@Q}~KNYPdQ94MZxA|gP9PSIqe@Ff$9bNNvws)xH zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1@T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#! zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD@m3s}`Yv5i3pOOat4?XSI`2YX_ literal 0 HcmV?d00001 diff --git a/pkg/archive/time_linux.go b/pkg/archive/time_linux.go new file mode 100644 index 00000000..3448569b --- /dev/null +++ b/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/pkg/archive/time_unsupported.go b/pkg/archive/time_unsupported.go new file mode 100644 index 00000000..e85aac05 --- /dev/null +++ b/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go new file mode 100644 index 00000000..3624fe5a --- /dev/null +++ b/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + return ApplyLayer(dest, ArchiveReader(r)) + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/pkg/archive/wrap.go b/pkg/archive/wrap.go new file mode 100644 index 00000000..b8b60197 --- /dev/null +++ b/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "bytes" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/pkg/broadcastwriter/broadcastwriter.go b/pkg/broadcastwriter/broadcastwriter.go new file mode 100644 index 00000000..1898302e --- /dev/null +++ b/pkg/broadcastwriter/broadcastwriter.go @@ -0,0 +1,101 @@ +package broadcastwriter + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/log" +) + +// BroadcastWriter accumulate multiple io.WriteCloser by stream. +type BroadcastWriter struct { + sync.Mutex + buf *bytes.Buffer + jsLogBuf *bytes.Buffer + streams map[string](map[io.WriteCloser]struct{}) +} + +// AddWriter adds new io.WriteCloser for stream. +// If stream is "", then all writes proceed as is. Otherwise every line from +// input will be packed to serialized jsonlog.JSONLog. +func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) { + w.Lock() + if _, ok := w.streams[stream]; !ok { + w.streams[stream] = make(map[io.WriteCloser]struct{}) + } + w.streams[stream][writer] = struct{}{} + w.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *BroadcastWriter) Write(p []byte) (n int, err error) { + created := time.Now().UTC() + w.Lock() + if writers, ok := w.streams[""]; ok { + for sw := range writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + delete(writers, sw) + } + } + } + if w.jsLogBuf == nil { + w.jsLogBuf = new(bytes.Buffer) + w.jsLogBuf.Grow(1024) + } + w.buf.Write(p) + for { + line, err := w.buf.ReadString('\n') + if err != nil { + w.buf.Write([]byte(line)) + break + } + for stream, writers := range w.streams { + if stream == "" { + continue + } + jsonLog := jsonlog.JSONLog{Log: line, Stream: stream, Created: created} + err = jsonLog.MarshalJSONBuf(w.jsLogBuf) + if err != nil { + log.Errorf("Error making JSON log line: %s", err) + continue + } + w.jsLogBuf.WriteByte('\n') + b := w.jsLogBuf.Bytes() + for sw := range writers { + if _, err := sw.Write(b); err != nil { + delete(writers, sw) + } + } + } + w.jsLogBuf.Reset() + } + w.jsLogBuf.Reset() + w.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *BroadcastWriter) Clean() error { + w.Lock() + for _, writers := range w.streams { + for w := range writers { + w.Close() + } + } + w.streams = make(map[string](map[io.WriteCloser]struct{})) + w.Unlock() + return nil +} + +func New() *BroadcastWriter { + return &BroadcastWriter{ + streams: make(map[string](map[io.WriteCloser]struct{})), + buf: bytes.NewBuffer(nil), + } +} diff --git a/pkg/broadcastwriter/broadcastwriter_test.go b/pkg/broadcastwriter/broadcastwriter_test.go new file mode 100644 index 00000000..62ca1265 --- /dev/null +++ b/pkg/broadcastwriter/broadcastwriter_test.go @@ -0,0 +1,144 @@ +package broadcastwriter + +import ( + "bytes" + "errors" + + "testing" +) + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestBroadcastWriter(t *testing.T) { + writer := New() + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.AddWriter(bufferA, "") + bufferB := &dummyWriter{} + writer.AddWriter(bufferB, "") + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.AddWriter(bufferC, "") + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + writer.Clean() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceBroadcastWriter(t *testing.T) { + writer := New() + c := make(chan bool) + go func() { + writer.AddWriter(devNullCloser(0), "") + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +func BenchmarkBroadcastWriter(b *testing.B) { + writer := New() + setUpWriter := func() { + for i := 0; i < 100; i++ { + writer.AddWriter(devNullCloser(0), "stdout") + writer.AddWriter(devNullCloser(0), "stderr") + writer.AddWriter(devNullCloser(0), "") + } + } + testLine := "Line that thinks that it is log line from docker" + var buf bytes.Buffer + for i := 0; i < 100; i++ { + buf.Write([]byte(testLine + "\n")) + } + // line without eol + buf.Write([]byte(testLine)) + testText := buf.Bytes() + b.SetBytes(int64(5 * len(testText))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + setUpWriter() + b.StartTimer() + + for j := 0; j < 5; j++ { + if _, err := writer.Write(testText); err != nil { + b.Fatal(err) + } + } + + b.StopTimer() + writer.Clean() + b.StartTimer() + } +} diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go new file mode 100644 index 00000000..0077f930 --- /dev/null +++ b/pkg/chrootarchive/archive.go @@ -0,0 +1,111 @@ +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +var chrootArchiver = &archive.Archiver{Untar} + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} + +func untar() { + runtime.LockOSThread() + flag.Parse() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + var options *archive.TarOptions + if err := json.NewDecoder(strings.NewReader(flag.Arg(1))).Decode(&options); err != nil { + fatal(err) + } + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + flush(os.Stdin) + os.Exit(0) +} + +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.Excludes == nil { + options.Excludes = []string{} + } + + var ( + buf bytes.Buffer + enc = json.NewEncoder(&buf) + ) + if err := enc.Encode(options); err != nil { + return fmt.Errorf("Untar json encode: %v", err) + } + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := os.MkdirAll(dest, 0777); err != nil { + return err + } + } + dest = filepath.Clean(dest) + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + + cmd := reexec.Command("docker-untar", dest, buf.String()) + cmd.Stdin = decompressedArchive + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Untar %s %s", err, out) + } + return nil +} + +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/', the final destination path +// will be `dst/base(src)`. +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go new file mode 100644 index 00000000..0fe3d64f --- /dev/null +++ b/pkg/chrootarchive/archive_test.go @@ -0,0 +1,101 @@ +package chrootarchive + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{Excludes: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/chrootarchive/diff.go b/pkg/chrootarchive/diff.go new file mode 100644 index 00000000..d4e9529b --- /dev/null +++ b/pkg/chrootarchive/diff.go @@ -0,0 +1,60 @@ +package chrootarchive + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func applyLayer() { + runtime.LockOSThread() + flag.Parse() + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + tmpDir, err := ioutil.TempDir("/", "temp-docker-extract") + if err != nil { + fatal(err) + } + os.Setenv("TMPDIR", tmpDir) + err = archive.UnpackLayer("/", os.Stdin) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + os.RemoveAll(tmpDir) + flush(os.Stdin) + os.Exit(0) +} + +func ApplyLayer(dest string, layer archive.ArchiveReader) error { + dest = filepath.Clean(dest) + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return err + } + defer func() { + if c, ok := decompressed.(io.Closer); ok { + c.Close() + } + }() + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = decompressed + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("ApplyLayer %s %s", err, out) + } + return nil +} diff --git a/pkg/chrootarchive/init.go b/pkg/chrootarchive/init.go new file mode 100644 index 00000000..4116026e --- /dev/null +++ b/pkg/chrootarchive/init.go @@ -0,0 +1,26 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-untar", untar) + reexec.Register("docker-applyLayer", applyLayer) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) { + io.Copy(ioutil.Discard, r) +} diff --git a/pkg/fileutils/fileutils.go b/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..acc27f55 --- /dev/null +++ b/pkg/fileutils/fileutils.go @@ -0,0 +1,26 @@ +package fileutils + +import ( + "github.com/docker/docker/pkg/log" + "path/filepath" +) + +// Matches returns true if relFilePath matches any of the patterns +func Matches(relFilePath string, patterns []string) (bool, error) { + for _, exclude := range patterns { + matched, err := filepath.Match(exclude, relFilePath) + if err != nil { + log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) + return false, err + } + if matched { + if filepath.Clean(relFilePath) == "." { + log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude) + continue + } + log.Debugf("Skipping excluded path: %s", relFilePath) + return true, nil + } + } + return false, nil +} diff --git a/pkg/graphdb/MAINTAINERS b/pkg/graphdb/MAINTAINERS new file mode 100644 index 00000000..1e998f8a --- /dev/null +++ b/pkg/graphdb/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/pkg/graphdb/conn_sqlite3.go b/pkg/graphdb/conn_sqlite3.go new file mode 100644 index 00000000..b6a8027a --- /dev/null +++ b/pkg/graphdb/conn_sqlite3.go @@ -0,0 +1,34 @@ +// +build cgo + +package graphdb + +import ( + "database/sql" + "os" + + _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite +) + +func NewSqliteConn(root string) (*Database, error) { + initDatabase := false + + stat, err := os.Stat(root) + if err != nil { + if os.IsNotExist(err) { + initDatabase = true + } else { + return nil, err + } + } + + if stat != nil && stat.Size() == 0 { + initDatabase = true + } + + conn, err := sql.Open("sqlite3", root) + if err != nil { + return nil, err + } + + return NewDatabase(conn, initDatabase) +} diff --git a/pkg/graphdb/conn_unsupported.go b/pkg/graphdb/conn_unsupported.go new file mode 100644 index 00000000..38950516 --- /dev/null +++ b/pkg/graphdb/conn_unsupported.go @@ -0,0 +1,7 @@ +// +build !cgo + +package graphdb + +func NewSqliteConn(root string) (*Database, error) { + panic("Not implemented") +} diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go new file mode 100644 index 00000000..59873fef --- /dev/null +++ b/pkg/graphdb/graphdb.go @@ -0,0 +1,528 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "path" + "strings" + "sync" +) + +const ( + createEntityTable = ` + CREATE TABLE IF NOT EXISTS entity ( + id text NOT NULL PRIMARY KEY + );` + + createEdgeTable = ` + CREATE TABLE IF NOT EXISTS edge ( + "entity_id" text NOT NULL, + "parent_id" text NULL, + "name" text NOT NULL, + CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), + CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") + ); + ` + + createEdgeIndices = ` + CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); + ` +) + +// Entity with a unique id +type Entity struct { + id string +} + +// An Edge connects two entities together +type Edge struct { + EntityID string + Name string + ParentID string +} + +type Entities map[string]*Entity +type Edges []*Edge + +type WalkFunc func(fullPath string, entity *Entity) error + +// Graph database for storing entities and their relationships +type Database struct { + conn *sql.DB + mux sync.RWMutex +} + +func IsNonUniqueNameError(err error) bool { + str := err.Error() + // sqlite 3.7.17-1ubuntu1 returns: + // Set failure: Abort due to constraint violation: columns parent_id, name are not unique + if strings.HasSuffix(str, "name are not unique") { + return true + } + // sqlite-3.8.3-1.fc20 returns: + // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name + if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { + return true + } + // sqlite-3.6.20-1.el6 returns: + // Set failure: Abort due to constraint violation: constraint failed + if strings.HasSuffix(str, "constraint failed") { + return true + } + return false +} + +// Create a new graph database initialized with a root entity +func NewDatabase(conn *sql.DB, init bool) (*Database, error) { + if conn == nil { + return nil, fmt.Errorf("Database connection cannot be nil") + } + db := &Database{conn: conn} + + if init { + if _, err := conn.Exec(createEntityTable); err != nil { + return nil, err + } + if _, err := conn.Exec(createEdgeTable); err != nil { + return nil, err + } + if _, err := conn.Exec(createEdgeIndices); err != nil { + return nil, err + } + + rollback := func() { + conn.Exec("ROLLBACK") + } + + // Create root entities + if _, err := conn.Exec("BEGIN"); err != nil { + return nil, err + } + if _, err := conn.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("COMMIT"); err != nil { + return nil, err + } + } + return db, nil +} + +// Close the underlying connection to the database +func (db *Database) Close() error { + return db.conn.Close() +} + +// Set the entity id for a given path +func (db *Database) Set(fullPath, id string) (*Entity, error) { + db.mux.Lock() + defer db.mux.Unlock() + + rollback := func() { + db.conn.Exec("ROLLBACK") + } + if _, err := db.conn.Exec("BEGIN EXCLUSIVE"); err != nil { + return nil, err + } + var entityId string + if err := db.conn.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityId); err != nil { + if err == sql.ErrNoRows { + if _, err := db.conn.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { + rollback() + return nil, err + } + } else { + rollback() + return nil, err + } + } + e := &Entity{id} + + parentPath, name := splitPath(fullPath) + if err := db.setEdge(parentPath, name, e); err != nil { + rollback() + return nil, err + } + + if _, err := db.conn.Exec("COMMIT"); err != nil { + return nil, err + } + return e, nil +} + +// Return true if a name already exists in the database +func (db *Database) Exists(name string) bool { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return false + } + return e != nil +} + +func (db *Database) setEdge(parentPath, name string, e *Entity) error { + parent, err := db.get(parentPath) + if err != nil { + return err + } + if parent.id == e.id { + return fmt.Errorf("Cannot set self as child") + } + + if _, err := db.conn.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { + return err + } + return nil +} + +// Return the root "/" entity for the database +func (db *Database) RootEntity() *Entity { + return &Entity{ + id: "0", + } +} + +// Return the entity for a given path +func (db *Database) Get(name string) *Entity { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil + } + return e +} + +func (db *Database) get(name string) (*Entity, error) { + e := db.RootEntity() + // We always know the root name so return it if + // it is requested + if name == "/" { + return e, nil + } + + parts := split(name) + for i := 1; i < len(parts); i++ { + p := parts[i] + if p == "" { + continue + } + + next := db.child(e, p) + if next == nil { + return nil, fmt.Errorf("Cannot find child for %s", name) + } + e = next + } + return e, nil + +} + +// List all entities by from the name +// The key will be the full path of the entity +func (db *Database) List(name string, depth int) Entities { + db.mux.RLock() + defer db.mux.RUnlock() + + out := Entities{} + e, err := db.get(name) + if err != nil { + return out + } + + children, err := db.children(e, name, depth, nil) + if err != nil { + return out + } + + for _, c := range children { + out[c.FullPath] = c.Entity + } + return out +} + +// Walk through the child graph of an entity, calling walkFunc for each child entity. +// It is safe for walkFunc to call graph functions. +func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { + children, err := db.Children(name, depth) + if err != nil { + return err + } + + // Note: the database lock must not be held while calling walkFunc + for _, c := range children { + if err := walkFunc(c.FullPath, c.Entity); err != nil { + return err + } + } + return nil +} + +// Return the children of the specified entity +func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + + return db.children(e, name, depth, nil) +} + +// Return the parents of a specified entity +func (db *Database) Parents(name string) ([]string, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + return db.parents(e) +} + +// Return the refrence count for a specified id +func (db *Database) Refs(id string) int { + db.mux.RLock() + defer db.mux.RUnlock() + + var count int + if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { + return 0 + } + return count +} + +// Return all the id's path references +func (db *Database) RefPaths(id string) Edges { + db.mux.RLock() + defer db.mux.RUnlock() + + refs := Edges{} + + rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) + if err != nil { + return refs + } + defer rows.Close() + + for rows.Next() { + var name string + var parentId string + if err := rows.Scan(&name, &parentId); err != nil { + return refs + } + refs = append(refs, &Edge{ + EntityID: id, + Name: name, + ParentID: parentId, + }) + } + return refs +} + +// Delete the reference to an entity at a given path +func (db *Database) Delete(name string) error { + db.mux.Lock() + defer db.mux.Unlock() + + if name == "/" { + return fmt.Errorf("Cannot delete root entity") + } + + parentPath, n := splitPath(name) + parent, err := db.get(parentPath) + if err != nil { + return err + } + + if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { + return err + } + return nil +} + +// Remove the entity with the specified id +// Walk the graph to make sure all references to the entity +// are removed and return the number of references removed +func (db *Database) Purge(id string) (int, error) { + db.mux.Lock() + defer db.mux.Unlock() + + rollback := func() { + db.conn.Exec("ROLLBACK") + } + + if _, err := db.conn.Exec("BEGIN"); err != nil { + return -1, err + } + + // Delete all edges + rows, err := db.conn.Exec("DELETE FROM edge WHERE entity_id = ?;", id) + if err != nil { + rollback() + return -1, err + } + + changes, err := rows.RowsAffected() + if err != nil { + return -1, err + } + + // Delete entity + if _, err := db.conn.Exec("DELETE FROM entity where id = ?;", id); err != nil { + rollback() + return -1, err + } + + if _, err := db.conn.Exec("COMMIT"); err != nil { + return -1, err + } + return int(changes), nil +} + +// Rename an edge for a given path +func (db *Database) Rename(currentName, newName string) error { + db.mux.Lock() + defer db.mux.Unlock() + + parentPath, name := splitPath(currentName) + newParentPath, newEdgeName := splitPath(newName) + + if parentPath != newParentPath { + return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) + } + + parent, err := db.get(parentPath) + if err != nil { + return err + } + + rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) + if err != nil { + return err + } + i, err := rows.RowsAffected() + if err != nil { + return err + } + if i == 0 { + return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) + } + return nil +} + +type WalkMeta struct { + Parent *Entity + Entity *Entity + FullPath string + Edge *Edge +} + +func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { + if e == nil { + return entities, nil + } + + rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var entityId, entityName string + if err := rows.Scan(&entityId, &entityName); err != nil { + return nil, err + } + child := &Entity{entityId} + edge := &Edge{ + ParentID: e.id, + Name: entityName, + EntityID: child.id, + } + + meta := WalkMeta{ + Parent: e, + Entity: child, + FullPath: path.Join(name, edge.Name), + Edge: edge, + } + + entities = append(entities, meta) + + if depth != 0 { + nDepth := depth + if depth != -1 { + nDepth -= 1 + } + entities, err = db.children(child, meta.FullPath, nDepth, entities) + if err != nil { + return nil, err + } + } + } + + return entities, nil +} + +func (db *Database) parents(e *Entity) (parents []string, err error) { + if e == nil { + return parents, nil + } + + rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var parentId string + if err := rows.Scan(&parentId); err != nil { + return nil, err + } + parents = append(parents, parentId) + } + + return parents, nil +} + +// Return the entity based on the parent path and name +func (db *Database) child(parent *Entity, name string) *Entity { + var id string + if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { + return nil + } + return &Entity{id} +} + +// Return the id used to reference this entity +func (e *Entity) ID() string { + return e.id +} + +// Return the paths sorted by depth +func (e Entities) Paths() []string { + out := make([]string, len(e)) + var i int + for k := range e { + out[i] = k + i++ + } + sortByDepth(out) + + return out +} diff --git a/pkg/graphdb/graphdb_test.go b/pkg/graphdb/graphdb_test.go new file mode 100644 index 00000000..7568e66d --- /dev/null +++ b/pkg/graphdb/graphdb_test.go @@ -0,0 +1,619 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "os" + "path" + "strconv" + "testing" + + _ "code.google.com/p/gosqlite/sqlite3" +) + +func newTestDb(t *testing.T) (*Database, string) { + p := path.Join(os.TempDir(), "sqlite.db") + conn, err := sql.Open("sqlite3", p) + db, err := NewDatabase(conn, true) + if err != nil { + t.Fatal(err) + } + return db, p +} + +func destroyTestDb(dbPath string) { + os.Remove(dbPath) +} + +func TestNewDatabase(t *testing.T) { + db, dbpath := newTestDb(t) + if db == nil { + t.Fatal("Database should not be nil") + } + db.Close() + defer destroyTestDb(dbpath) +} + +func TestCreateRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + root := db.RootEntity() + if root == nil { + t.Fatal("Root entity should not be nil") + } +} + +func TestGetRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + e := db.Get("/") + if e == nil { + t.Fatal("Entity should not be nil") + } + if e.ID() != "0" { + t.Fatalf("Enity id should be 0, got %s", e.ID()) + } +} + +func TestSetEntityWithDifferentName(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/test", "1") + if _, err := db.Set("/other", "1"); err != nil { + t.Fatal(err) + } +} + +func TestSetDuplicateEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/foo", "42"); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/foo", "43"); err == nil { + t.Fatalf("Creating an entry with a duplciate path did not cause an error") + } +} + +func TestCreateChild(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/db", "1") + if err != nil { + t.Fatal(err) + } + if child == nil { + t.Fatal("Child should not be nil") + } + if child.ID() != "1" { + t.Fail() + } +} + +func TestParents(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + + for i := 6; i < 11; i++ { + a := strconv.Itoa(i) + p := strconv.Itoa(i - 5) + + key := fmt.Sprintf("/%s/%s", p, a) + + if _, err := db.Set(key, a); err != nil { + t.Fatal(err) + } + + parents, err := db.Parents(key) + if err != nil { + t.Fatal(err) + } + + if len(parents) != 1 { + t.Fatalf("Expected 2 entries for %s got %d", key, len(parents)) + } + + if parents[0] != p { + t.Fatalf("ID %s received, %s expected", parents[0], p) + } + } +} + +func TestChildren(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + str := "/" + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + + str = "/" + for i := 10; i < 30; i++ { // 20 entities + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + entries, err := db.Children("/", 5) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 11 { + t.Fatalf("Expect 11 entries for / got %d", len(entries)) + } + + entries, err = db.Children("/", 20) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 25 { + t.Fatalf("Expect 25 entries for / got %d", len(entries)) + } +} + +func TestListAllRootChildren(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + entries := db.List("/", -1) + if len(entries) != 5 { + t.Fatalf("Expect 5 entries for / got %d", len(entries)) + } +} + +func TestListAllSubChildren(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + entries := db.List("/webapp", 1) + if len(entries) != 3 { + t.Fatalf("Expect 3 entries for / got %d", len(entries)) + } + + entries = db.List("/webapp", 0) + if len(entries) != 2 { + t.Fatalf("Expect 2 entries for / got %d", len(entries)) + } +} + +func TestAddSelfAsChild(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/test", "1") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/test/other", child.ID()); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestAddChildToNonExistantRoot(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestWalkAll(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/db/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Walk("/", func(p string, e *Entity) error { + t.Logf("Path: %s Entity: %s", p, e.ID()) + return nil + }, -1); err != nil { + t.Fatal(err) + } +} + +func TestGetEntityByPath(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + entity := db.Get("/webapp/db/logs") + if entity == nil { + t.Fatal("Entity should not be nil") + } + if entity.ID() != "4" { + t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) + } +} + +func TestEnitiesPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + out := db.List("/", -1) + for _, p := range out.Paths() { + t.Log(p) + } +} + +func TestDeleteRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if err := db.Delete("/"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestDeleteEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Delete("/webapp/sentry"); err != nil { + t.Fatal(err) + } + entity := db.Get("/webapp/sentry") + if entity != nil { + t.Fatal("Entity /webapp/sentry should be nil") + } +} + +func TestCountRefs(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + if db.Refs("2") != 2 { + t.Fatal("Expect reference count to be 2") + } +} + +func TestPurgeId(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatal("Expected 2 references to be removed") + } +} + +func TestRename(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + if db.Get("/webapp/db") == nil { + t.Fatal("Cannot find entity at path /webapp/db") + } + + if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { + t.Fatal(err) + } + if db.Get("/webapp/db") != nil { + t.Fatal("Entity should not exist at /webapp/db") + } + if db.Get("/webapp/newdb") == nil { + t.Fatal("Cannot find entity at path /webapp/newdb") + } + +} + +func TestCreateMultipleNames(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/db", "1") + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + db.Walk("/", func(p string, e *Entity) error { + t.Logf("%s\n", p) + return nil + }, -1) +} + +func TestRefPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + refs := db.RefPaths("2") + if len(refs) != 2 { + t.Fatalf("Expected reference count to be 2, got %d", len(refs)) + } +} + +func TestExistsTrue(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/testing", "1") + + if !db.Exists("/testing") { + t.Fatalf("/tesing should exist") + } +} + +func TestExistsFalse(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/toerhe", "1") + + if db.Exists("/testing") { + t.Fatalf("/tesing should not exist") + } + +} + +func TestGetNameWithTrailingSlash(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/todo", "1") + + e := db.Get("/todo/") + if e == nil { + t.Fatalf("Entity should not be nil") + } +} + +func TestConcurrentWrites(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + errs := make(chan error, 2) + + save := func(name string, id string) { + if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { + errs <- err + } + errs <- nil + } + purge := func(id string) { + if _, err := db.Purge(id); err != nil { + errs <- err + } + errs <- nil + } + + save("/1", "1") + + go purge("1") + go save("/2", "2") + + any := false + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + any = true + t.Log(err) + } + } + if any { + t.Fail() + } +} diff --git a/pkg/graphdb/sort.go b/pkg/graphdb/sort.go new file mode 100644 index 00000000..c07df077 --- /dev/null +++ b/pkg/graphdb/sort.go @@ -0,0 +1,27 @@ +package graphdb + +import "sort" + +type pathSorter struct { + paths []string + by func(i, j string) bool +} + +func sortByDepth(paths []string) { + s := &pathSorter{paths, func(i, j string) bool { + return PathDepth(i) > PathDepth(j) + }} + sort.Sort(s) +} + +func (s *pathSorter) Len() int { + return len(s.paths) +} + +func (s *pathSorter) Swap(i, j int) { + s.paths[i], s.paths[j] = s.paths[j], s.paths[i] +} + +func (s *pathSorter) Less(i, j int) bool { + return s.by(s.paths[i], s.paths[j]) +} diff --git a/pkg/graphdb/sort_test.go b/pkg/graphdb/sort_test.go new file mode 100644 index 00000000..ddf2266f --- /dev/null +++ b/pkg/graphdb/sort_test.go @@ -0,0 +1,29 @@ +package graphdb + +import ( + "testing" +) + +func TestSort(t *testing.T) { + paths := []string{ + "/", + "/myreallylongname", + "/app/db", + } + + sortByDepth(paths) + + if len(paths) != 3 { + t.Fatalf("Expected 3 parts got %d", len(paths)) + } + + if paths[0] != "/app/db" { + t.Fatalf("Expected /app/db got %s", paths[0]) + } + if paths[1] != "/myreallylongname" { + t.Fatalf("Expected /myreallylongname got %s", paths[1]) + } + if paths[2] != "/" { + t.Fatalf("Expected / got %s", paths[2]) + } +} diff --git a/pkg/graphdb/utils.go b/pkg/graphdb/utils.go new file mode 100644 index 00000000..bdbcd798 --- /dev/null +++ b/pkg/graphdb/utils.go @@ -0,0 +1,32 @@ +package graphdb + +import ( + "path" + "strings" +) + +// Split p on / +func split(p string) []string { + return strings.Split(p, "/") +} + +// Returns the depth or number of / in a given path +func PathDepth(p string) int { + parts := split(p) + if len(parts) == 2 && parts[1] == "" { + return 1 + } + return len(parts) +} + +func splitPath(p string) (parent, name string) { + if p[0] != '/' { + p = "/" + p + } + parent, name = path.Split(p) + l := len(parent) + if parent[l-1] == '/' { + parent = parent[:l-1] + } + return +} diff --git a/pkg/httputils/MAINTAINERS b/pkg/httputils/MAINTAINERS new file mode 100644 index 00000000..6dde4769 --- /dev/null +++ b/pkg/httputils/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/pkg/httputils/resumablerequestreader.go b/pkg/httputils/resumablerequestreader.go new file mode 100644 index 00000000..3cd1f491 --- /dev/null +++ b/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,93 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/docker/pkg/log" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + log.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/pkg/ioutils/readers.go b/pkg/ioutils/readers.go new file mode 100644 index 00000000..22f46fbd --- /dev/null +++ b/pkg/ioutils/readers.go @@ -0,0 +1,114 @@ +package ioutils + +import ( + "bytes" + "io" + "sync" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +type bufReader struct { + sync.Mutex + buf *bytes.Buffer + reader io.Reader + err error + wait sync.Cond + drainBuf []byte +} + +func NewBufReader(r io.Reader) *bufReader { + reader := &bufReader{ + buf: &bytes.Buffer{}, + drainBuf: make([]byte, 1024), + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { + reader := &bufReader{ + buf: buffer, + drainBuf: drainBuffer, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func (r *bufReader) drain() { + for { + n, err := r.reader.Read(r.drainBuf) + r.Lock() + if err != nil { + r.err = err + } else { + r.buf.Write(r.drainBuf[0:n]) + } + r.wait.Signal() + r.Unlock() + if err != nil { + break + } + } +} + +func (r *bufReader) Read(p []byte) (n int, err error) { + r.Lock() + defer r.Unlock() + for { + n, err = r.buf.Read(p) + if n > 0 { + return n, err + } + if r.err != nil { + return 0, r.err + } + r.wait.Wait() + } +} + +func (r *bufReader) Close() error { + closer, ok := r.reader.(io.ReadCloser) + if !ok { + return nil + } + return closer.Close() +} diff --git a/pkg/ioutils/readers_test.go b/pkg/ioutils/readers_test.go new file mode 100644 index 00000000..a7a2dad1 --- /dev/null +++ b/pkg/ioutils/readers_test.go @@ -0,0 +1,34 @@ +package ioutils + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestBufReader(t *testing.T) { + reader, writer := io.Pipe() + bufreader := NewBufReader(reader) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} diff --git a/pkg/ioutils/writers.go b/pkg/ioutils/writers.go new file mode 100644 index 00000000..c0b3608f --- /dev/null +++ b/pkg/ioutils/writers.go @@ -0,0 +1,39 @@ +package ioutils + +import "io" + +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +type NopFlusher struct{} + +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} diff --git a/pkg/iptables/MAINTAINERS b/pkg/iptables/MAINTAINERS new file mode 100644 index 00000000..1e998f8a --- /dev/null +++ b/pkg/iptables/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go new file mode 100644 index 00000000..88d8b5f3 --- /dev/null +++ b/pkg/iptables/iptables.go @@ -0,0 +1,193 @@ +package iptables + +import ( + "errors" + "fmt" + "net" + "os" + "os/exec" + "regexp" + "strconv" + "strings" +) + +type Action string + +const ( + Add Action = "-A" + Delete Action = "-D" +) + +var ( + ErrIptablesNotFound = errors.New("Iptables not found") + nat = []string{"-t", "nat"} + supportsXlock = false +) + +type Chain struct { + Name string + Bridge string +} + +func init() { + supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil +} + +func NewChain(name, bridge string) (*Chain, error) { + if output, err := Raw("-t", "nat", "-N", name); err != nil { + return nil, err + } else if len(output) != 0 { + return nil, fmt.Errorf("Error creating new iptables chain: %s", output) + } + chain := &Chain{ + Name: name, + Bridge: bridge, + } + + if err := chain.Prerouting(Add, "-m", "addrtype", "--dst-type", "LOCAL"); err != nil { + return nil, fmt.Errorf("Failed to inject docker in PREROUTING chain: %s", err) + } + if err := chain.Output(Add, "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8"); err != nil { + return nil, fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err) + } + return chain, nil +} + +func RemoveExistingChain(name string) error { + chain := &Chain{ + Name: name, + } + return chain.Remove() +} + +func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr string, dest_port int) error { + daddr := ip.String() + if ip.IsUnspecified() { + // iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we + // want "0.0.0.0/0". "0/0" is correctly interpreted as "any + // value" by both iptables and ip6tables. + daddr = "0/0" + } + if output, err := Raw("-t", "nat", fmt.Sprint(action), c.Name, + "-p", proto, + "-d", daddr, + "--dport", strconv.Itoa(port), + "!", "-i", c.Bridge, + "-j", "DNAT", + "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Error iptables forward: %s", output) + } + + fAction := action + if fAction == Add { + fAction = "-I" + } + if output, err := Raw(string(fAction), "FORWARD", + "!", "-i", c.Bridge, + "-o", c.Bridge, + "-p", proto, + "-d", dest_addr, + "--dport", strconv.Itoa(dest_port), + "-j", "ACCEPT"); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Error iptables forward: %s", output) + } + + return nil +} + +func (c *Chain) Prerouting(action Action, args ...string) error { + a := append(nat, fmt.Sprint(action), "PREROUTING") + if len(args) > 0 { + a = append(a, args...) + } + if output, err := Raw(append(a, "-j", c.Name)...); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Error iptables prerouting: %s", output) + } + return nil +} + +func (c *Chain) Output(action Action, args ...string) error { + a := append(nat, fmt.Sprint(action), "OUTPUT") + if len(args) > 0 { + a = append(a, args...) + } + if output, err := Raw(append(a, "-j", c.Name)...); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Error iptables output: %s", output) + } + return nil +} + +func (c *Chain) Remove() error { + // Ignore errors - This could mean the chains were never set up + c.Prerouting(Delete, "-m", "addrtype", "--dst-type", "LOCAL") + c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8") + c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL") // Created in versions <= 0.1.6 + + c.Prerouting(Delete) + c.Output(Delete) + + Raw("-t", "nat", "-F", c.Name) + Raw("-t", "nat", "-X", c.Name) + + return nil +} + +// Check if an existing rule exists +func Exists(args ...string) bool { + // iptables -C, --check option was added in v.1.4.11 + // http://ftp.netfilter.org/pub/iptables/changes-iptables-1.4.11.txt + + // try -C + // if exit status is 0 then return true, the rule exists + if _, err := Raw(append([]string{"-C"}, args...)...); err == nil { + return true + } + + // parse iptables-save for the rule + rule := strings.Replace(strings.Join(args, " "), "-t nat ", "", -1) + existingRules, _ := exec.Command("iptables-save").Output() + + // regex to replace ips in rule + // because MASQUERADE rule will not be exactly what was passed + re := regexp.MustCompile(`[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2}`) + + return strings.Contains( + re.ReplaceAllString(string(existingRules), "?"), + re.ReplaceAllString(rule, "?"), + ) +} + +func Raw(args ...string) ([]byte, error) { + path, err := exec.LookPath("iptables") + if err != nil { + return nil, ErrIptablesNotFound + } + + if supportsXlock { + args = append([]string{"--wait"}, args...) + } + + if os.Getenv("DEBUG") != "" { + fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s, %v\n", path, args)) + } + + output, err := exec.Command(path, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err) + } + + // ignore iptables' message about xtables lock + if strings.Contains(string(output), "waiting for it to exit") { + output = []byte("") + } + + return output, err +} diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go new file mode 100644 index 00000000..b0c61a80 --- /dev/null +++ b/pkg/jsonlog/jsonlog.go @@ -0,0 +1,53 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "io" + "log" + "time" +) + +type JSONLog struct { + Log string `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created time.Time `json:"time"` +} + +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil +} + +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} + +func WriteLog(src io.Reader, dst io.Writer, format string) error { + dec := json.NewDecoder(src) + l := &JSONLog{} + for { + if err := dec.Decode(l); err == io.EOF { + return nil + } else if err != nil { + log.Printf("Error streaming logs: %s", err) + return err + } + line, err := l.Format(format) + if err != nil { + return err + } + if _, err := io.WriteString(dst, line); err != nil { + return err + } + l.Reset() + } +} diff --git a/pkg/jsonlog/jsonlog_marshalling.go b/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 00000000..6c7f73a9 --- /dev/null +++ b/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,176 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make shell BINDDIR=. +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +//+ +//+ "github.com/docker/docker/pkg/timeutils" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = timeutils.FastMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" + + "github.com/docker/docker/pkg/timeutils" +) + +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first bool = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"log":`) + ffjson_WriteJsonString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjson_WriteJsonString(buf, mj.Stream) + } + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = timeutils.FastMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjson_WriteJsonString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/pkg/jsonlog/jsonlog_test.go b/pkg/jsonlog/jsonlog_test.go new file mode 100644 index 00000000..5ee5eda3 --- /dev/null +++ b/pkg/jsonlog/jsonlog_test.go @@ -0,0 +1,61 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "regexp" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/timeutils" +) + +func TestWriteLog(t *testing.T) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + testLine := "Line that thinks that it is log line from docker\n" + for i := 0; i < 30; i++ { + e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()}) + } + w := bytes.NewBuffer(nil) + format := timeutils.RFC3339NanoFixed + if err := WriteLog(&buf, w, format); err != nil { + t.Fatal(err) + } + res := w.String() + t.Logf("Result of WriteLog: %q", res) + lines := strings.Split(strings.TrimSpace(res), "\n") + if len(lines) != 30 { + t.Fatalf("Must be 30 lines but got %d", len(lines)) + } + logRe := regexp.MustCompile(`\[.*\] Line that thinks that it is log line from docker`) + for _, l := range lines { + if !logRe.MatchString(l) { + t.Fatalf("Log line not in expected format: %q", l) + } + } +} + +func BenchmarkWriteLog(b *testing.B) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + testLine := "Line that thinks that it is log line from docker\n" + for i := 0; i < 30; i++ { + e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()}) + } + r := bytes.NewReader(buf.Bytes()) + w := ioutil.Discard + format := timeutils.RFC3339NanoFixed + b.SetBytes(int64(r.Len())) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := WriteLog(r, w, format); err != nil { + b.Fatal(err) + } + b.StopTimer() + r.Seek(0, 0) + b.StartTimer() + } +} diff --git a/pkg/listenbuffer/buffer.go b/pkg/listenbuffer/buffer.go new file mode 100644 index 00000000..17572c8a --- /dev/null +++ b/pkg/listenbuffer/buffer.go @@ -0,0 +1,46 @@ +/* + Package to allow go applications to immediately start + listening on a socket, unix, tcp, udp but hold connections + until the application has booted and is ready to accept them +*/ +package listenbuffer + +import "net" + +// NewListenBuffer returns a listener listening on addr with the protocol. +func NewListenBuffer(proto, addr string, activate chan struct{}) (net.Listener, error) { + wrapped, err := net.Listen(proto, addr) + if err != nil { + return nil, err + } + + return &defaultListener{ + wrapped: wrapped, + activate: activate, + }, nil +} + +type defaultListener struct { + wrapped net.Listener // the real listener to wrap + ready bool // is the listner ready to start accpeting connections + activate chan struct{} +} + +func (l *defaultListener) Close() error { + return l.wrapped.Close() +} + +func (l *defaultListener) Addr() net.Addr { + return l.wrapped.Addr() +} + +func (l *defaultListener) Accept() (net.Conn, error) { + // if the listen has been told it is ready then we can go ahead and + // start returning connections + if l.ready { + return l.wrapped.Accept() + } + <-l.activate + l.ready = true + return l.Accept() +} diff --git a/pkg/log/log.go b/pkg/log/log.go new file mode 100644 index 00000000..53be6cf1 --- /dev/null +++ b/pkg/log/log.go @@ -0,0 +1,83 @@ +package log + +import ( + "fmt" + "io" + "os" + "runtime" + "strings" +) + +type priority int + +const ( + errorFormat = "[%s] %s:%d %s\n" + logFormat = "[%s] %s\n" + + fatal priority = iota + error + info + debug +) + +// A common interface to access the Fatal method of +// both testing.B and testing.T. +type Fataler interface { + Fatal(args ...interface{}) +} + +func (p priority) String() string { + switch p { + case fatal: + return "fatal" + case error: + return "error" + case info: + return "info" + case debug: + return "debug" + } + + return "" +} + +// Debug function, if the debug flag is set, then display. Do nothing otherwise +// If Docker is in damon mode, also send the debug info on the socket +func Debugf(format string, a ...interface{}) { + if os.Getenv("DEBUG") != "" { + logf(os.Stderr, debug, format, a...) + } +} + +func Infof(format string, a ...interface{}) { + logf(os.Stdout, info, format, a...) +} + +func Errorf(format string, a ...interface{}) { + logf(os.Stderr, error, format, a...) +} + +func Fatalf(format string, a ...interface{}) { + logf(os.Stderr, fatal, format, a...) + os.Exit(1) +} + +func logf(stream io.Writer, level priority, format string, a ...interface{}) { + var prefix string + + if level <= error || level == debug { + // Retrieve the stack infos + _, file, line, ok := runtime.Caller(2) + if !ok { + file = "" + line = -1 + } else { + file = file[strings.LastIndex(file, "/")+1:] + } + prefix = fmt.Sprintf(errorFormat, level.String(), file, line, format) + } else { + prefix = fmt.Sprintf(logFormat, level.String(), format) + } + + fmt.Fprintf(stream, prefix, a...) +} diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go new file mode 100644 index 00000000..83ba5fd2 --- /dev/null +++ b/pkg/log/log_test.go @@ -0,0 +1,37 @@ +package log + +import ( + "bytes" + "regexp" + + "testing" +) + +func TestLogFatalf(t *testing.T) { + var output *bytes.Buffer + + tests := []struct { + Level priority + Format string + Values []interface{} + ExpectedPattern string + }{ + {fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"}, + {debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + } + + for i, test := range tests { + output = &bytes.Buffer{} + logf(output, test.Level, test.Format, test.Values...) + + expected := regexp.MustCompile(test.ExpectedPattern) + if !expected.MatchString(output.String()) { + t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s", + i, + expected.String(), + output.String()) + } + } +} diff --git a/pkg/mflag/LICENSE b/pkg/mflag/LICENSE new file mode 100644 index 00000000..ebcfbcc7 --- /dev/null +++ b/pkg/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/mflag/MAINTAINERS b/pkg/mflag/MAINTAINERS new file mode 100644 index 00000000..e0f18f14 --- /dev/null +++ b/pkg/mflag/MAINTAINERS @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff --git a/pkg/mflag/README.md b/pkg/mflag/README.md new file mode 100644 index 00000000..da00efa3 --- /dev/null +++ b/pkg/mflag/README.md @@ -0,0 +1,40 @@ +Package mflag (aka multiple-flag) implements command-line flag parsing. +It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) + +It adds: + +* both short and long flag version +`./example -s red` `./example --string blue` + +* multiple names for the same option +``` +$>./example -h +Usage of example: + -s, --string="": a simple string +``` + +___ +It is very flexible on purpose, so you can do things like: +``` +$>./example -h +Usage of example: + -s, -string, --string="": a simple string +``` + +Or: +``` +$>./example -h +Usage of example: + -oldflag, --newflag="": a simple string +``` + +You can also hide some flags from the usage, so if we want only `--newflag`: +``` +$>./example -h +Usage of example: + --newflag="": a simple string +$>./example -oldflag str +str +``` + +See [example.go](example/example.go) for more details. diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go new file mode 100644 index 00000000..2e766dd1 --- /dev/null +++ b/pkg/mflag/example/example.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + i int + str string + b, b2, h bool +) + +func init() { + flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") + flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") + flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") + flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") + flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") + flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage + flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") + flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") + flag.Parse() +} +func main() { + if h { + flag.PrintDefaults() + } else { + fmt.Printf("s/#hidden/-string: %s\n", str) + fmt.Printf("b: %t\n", b) + fmt.Printf("-bool: %t\n", b2) + fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) + fmt.Printf("ARGS: %v\n", flag.Args()) + } +} diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go new file mode 100644 index 00000000..b40f9117 --- /dev/null +++ b/pkg/mflag/flag.go @@ -0,0 +1,1003 @@ +// Copyright 2014 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package flag implements command-line flag parsing. + + Usage: + + Define flags using flag.String(), Bool(), Int(), etc. + + This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. + import "flag /github.com/docker/docker/pkg/mflag" + var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + // -flaghidden will work, but will be hidden from the usage + flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") + } + Or you can create custom flags that satisfy the Value interface (with + pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, []string{"name"}, "help message for flagname") + For such flags, the default value is just the initial value of the variable. + + You can also add "deprecated" flags, they are still usable, bur are not shown + in the usage and will display a warning when you try to use them: + var ip = flag.Int([]string{"f", "#flagname", "-flagname"}, 1234, "help message for flagname") + this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` and + var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") + will display: `Warning: '-t' is deprecated, it will be removed soon. See usage.` + + You can also group one letter flags, bif you declare + var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") + var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") + you will be able to use the -vs or -sv + + After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. + + Flags may then be used directly. If you're using the flags themselves, + they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + + After parsing, the arguments after the flag are available as the + slice flag.Args() or individually as flag.Arg(i). + The arguments are indexed from 0 through flag.NArg()-1. + + Command line flag syntax: + -flag + -flag=x + -flag="x" + -flag='x' + -flag x // non-boolean flags only + One or two minus signs may be used; they are equivalent. + The last form is not permitted for boolean flags because the + meaning of the command + cmd -x * + will change if there is a file called 0, false, etc. You must + use the -flag=false form to turn off a boolean flag. + + Flag parsing stops just before the first non-flag argument + ("-" is a non-flag argument) or after the terminator "--". + + Integer flags accept 1234, 0664, 0x1234 and may be negative. + Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. + Duration flags accept any input valid for time.ParseDuration. + + The default set of command-line flags is controlled by + top-level functions. The FlagSet type allows one to define + independent sets of flags, such as to implement subcommands + in a command-line interface. The methods of FlagSet are + analogous to the top-level functions for the command-line + flag set. +*/ +package mflag + +import ( + "errors" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// ErrRetry is the error returned if you need to try letter by letter +var ErrRetry = errors.New("flag: retry") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, +// the command-line parser makes -name equivalent to -name=true +// rather than using the next command-line argument. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. The zero value of a FlagSet +// has no name and has ContinueOnError error handling. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor +} + +// A Flag represents the state of a flag. +type Flag struct { + Names []string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +type flagSlice []string + +func (p flagSlice) Len() int { return len(p) } +func (p flagSlice) Less(i, j int) bool { + pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") + lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) + if lpi != lpj { + return lpi < lpj + } + return pi < pj +} +func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + var list flagSlice + + // The sorted list is based on the first name, when flag map might use the other names. + nameMap := make(map[string]string) + + for n, f := range flags { + fName := strings.TrimPrefix(f.Names[0], "#") + nameMap[fName] = n + if len(f.Names) == 1 { + list = append(list, fName) + continue + } + + found := false + for _, name := range list { + if name == fName { + found = true + break + } + } + if !found { + list = append(list, fName) + } + } + sort.Sort(list) + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[nameMap[name]] + } + return result +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.formal[name] +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.formal[name] +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + flag, ok := f.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + writer := tabwriter.NewWriter(f.out(), 20, 1, 3, ' ', 0) + f.VisitAll(func(flag *Flag) { + format := " -%s=%s" + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + format = " -%s=%q" + } + names := []string{} + for _, name := range flag.Names { + if name[0] != '#' { + names = append(names, name) + } + } + if len(names) > 0 { + fmt.Fprintf(writer, format, strings.Join(names, ", -"), flag.DefValue) + for i, line := range strings.Split(flag.Usage, "\n") { + if i != 0 { + line = " " + line + } + fmt.Fprintln(writer, "\t", line) + } + } + }) + writer.Flush() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + if f.name == "" { + fmt.Fprintf(f.out(), "Usage:\n") + } else { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + } + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// FlagCount returns the number of flags that have been defined. +func (f *FlagSet) FlagCount() int { return len(sortFlags(f.formal)) } + +// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. +func (f *FlagSet) FlagCountUndeprecated() int { + count := 0 + for _, flag := range sortFlags(f.formal) { + for _, name := range flag.Names { + if name[0] != '#' { + count++ + break + } + } + } + return count +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { + f.Var(newBoolValue(value, p), names, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, names []string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), names, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(names []string, value bool, usage string) *bool { + p := new(bool) + f.BoolVar(p, names, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(names []string, value bool, usage string) *bool { + return CommandLine.Bool(names, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, names []string, value int, usage string) { + f.Var(newIntValue(value, p), names, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, names []string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), names, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(names []string, value int, usage string) *int { + p := new(int) + f.IntVar(p, names, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(names []string, value int, usage string) *int { + return CommandLine.Int(names, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { + f.Var(newInt64Value(value, p), names, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, names []string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), names, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(names []string, value int64, usage string) *int64 { + p := new(int64) + f.Int64Var(p, names, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(names []string, value int64, usage string) *int64 { + return CommandLine.Int64(names, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { + f.Var(newUintValue(value, p), names, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, names []string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), names, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(names []string, value uint, usage string) *uint { + p := new(uint) + f.UintVar(p, names, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(names []string, value uint, usage string) *uint { + return CommandLine.Uint(names, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { + f.Var(newUint64Value(value, p), names, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, names []string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), names, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64Var(p, names, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(names []string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(names, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, names []string, value string, usage string) { + f.Var(newStringValue(value, p), names, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, names []string, value string, usage string) { + CommandLine.Var(newStringValue(value, p), names, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(names []string, value string, usage string) *string { + p := new(string) + f.StringVar(p, names, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(names []string, value string, usage string) *string { + return CommandLine.String(names, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { + f.Var(newFloat64Value(value, p), names, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, names []string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), names, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(names []string, value float64, usage string) *float64 { + p := new(float64) + f.Float64Var(p, names, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(names []string, value float64, usage string) *float64 { + return CommandLine.Float64(names, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + f.Var(newDurationValue(value, p), names, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), names, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVar(p, names, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(names []string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(names, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, names []string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{names, usage, value, value.String()} + for _, name := range names { + name = strings.TrimPrefix(name, "#") + _, alreadythere := f.formal[name] + if alreadythere { + var msg string + if f.name == "" { + msg = fmt.Sprintf("flag redefined: %s", name) + } else { + msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) + } + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[string]*Flag) + } + f.formal[name] = flag + } +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, names []string, usage string) { + CommandLine.Var(value, names, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func trimQuotes(str string) string { + if len(str) == 0 { + return str + } + type quote struct { + start, end byte + } + + // All valid quote types. + quotes := []quote{ + // Double quotes + { + start: '"', + end: '"', + }, + + // Single quotes + { + start: '\'', + end: '\'', + }, + } + + for _, quote := range quotes { + // Only strip if outermost match. + if str[0] == quote.start && str[len(str)-1] == quote.end { + str = str[1 : len(str)-1] + break + } + } + + return str +} + +// parseOne parses one flag. It reports whether a flag was seen. +func (f *FlagSet) parseOne() (bool, string, error) { + if len(f.args) == 0 { + return false, "", nil + } + s := f.args[0] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + return false, "", nil + } + if s[1] == '-' && len(s) == 2 { // "--" terminates the flags + f.args = f.args[1:] + return false, "", nil + } + name := s[1:] + if len(name) == 0 || name[0] == '=' { + return false, "", f.failf("bad flag syntax: %s", s) + } + + // it's a flag. does it have an argument? + f.args = f.args[1:] + has_value := false + value := "" + if i := strings.Index(name, "="); i != -1 { + value = trimQuotes(name[i+1:]) + has_value = true + name = name[:i] + } + + m := f.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "-help" || name == "help" || name == "h" { // special case for nice help message. + f.usage() + return false, "", ErrHelp + } + if len(name) > 0 && name[0] == '-' { + return false, "", f.failf("flag provided but not defined: -%s", name) + } + return false, name, ErrRetry + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if has_value { + if err := fv.Set(value); err != nil { + return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + fv.Set("true") + } + } else { + // It must have a value, which might be the next argument. + if !has_value && len(f.args) > 0 { + // value is the next arg + has_value = true + value, f.args = f.args[0], f.args[1:] + } + if !has_value { + return false, "", f.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, "", f.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + for i, n := range flag.Names { + if n == fmt.Sprintf("#%s", name) { + replacement := "" + for j := i; j < len(flag.Names); j++ { + if flag.Names[j][0] != '#' { + replacement = flag.Names[j] + break + } + } + if replacement != "" { + fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) + } else { + fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + } + } + } + return true, "", nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = arguments + for { + seen, name, err := f.parseOne() + if seen { + continue + } + if err == nil { + break + } + if err == ErrRetry { + if len(name) > 1 { + err = nil + for _, letter := range strings.Split(name, "") { + f.args = append([]string{"-" + letter}, f.args...) + seen2, _, err2 := f.parseOne() + if seen2 { + continue + } + if err2 != nil { + err = f.failf("flag provided but not defined: -%s", name) + break + } + } + if err == nil { + continue + } + } else { + err = f.failf("flag provided but not defined: -%s", name) + } + } + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling +} diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go new file mode 100644 index 00000000..340a1cb1 --- /dev/null +++ b/pkg/mflag/flag_test.go @@ -0,0 +1,506 @@ +// Copyright 2014 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mflag_test + +import ( + "bytes" + "fmt" + . "github.com/docker/docker/pkg/mflag" + "os" + "sort" + "strings" + "testing" + "time" +) + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = NewFlagSet(os.Args[0], ContinueOnError) + Usage = usage +} +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, false, "bool value") + Int([]string{"test_int"}, 0, "int value") + Int64([]string{"test_int64"}, 0, "int64 value") + Uint([]string{"test_uint"}, 0, "uint value") + Uint64([]string{"test_uint64"}, 0, "uint64 value") + String([]string{"test_string"}, "0", "string value") + Float64([]string{"test_float64"}, 0, "float64 value") + Duration([]string{"test_duration"}, 0, "time.Duration value") + + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + m[name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", name) + } + } + } + } + VisitAll(visitor) + if len(m) != 8 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + desired = "1" + Visit(visitor) + if len(m) != 8 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { + for _, name := range f.Names { + flagNames = append(flagNames, name) + } + }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestGet(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, true, "bool value") + Int([]string{"test_int"}, 1, "int value") + Int64([]string{"test_int64"}, 2, "int64 value") + Uint([]string{"test_uint"}, 3, "uint value") + Uint64([]string{"test_uint64"}, 4, "uint64 value") + String([]string{"test_string"}, "5", "string value") + Float64([]string{"test_float64"}, 6, "float64 value") + Duration([]string{"test_duration"}, 7, "time.Duration value") + + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + g, ok := f.Value.(Getter) + if !ok { + t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) + return + } + switch name { + case "test_bool": + ok = g.Get() == true + case "test_int": + ok = g.Get() == int(1) + case "test_int64": + ok = g.Get() == int64(2) + case "test_uint": + ok = g.Get() == uint(3) + case "test_uint64": + ok = g.Get() == uint64(4) + case "test_string": + ok = g.Get() == "5" + case "test_float64": + ok = g.Get() == float64(6) + case "test_duration": + ok = g.Get() == time.Duration(7) + } + if !ok { + t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) + } + } + } + } + VisitAll(visitor) +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + if CommandLine.Parse([]string{"-x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if !called { + t.Error("did not call Usage for unknown flag") + } +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool([]string{"bool"}, false, "bool value") + bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") + intFlag := f.Int([]string{"-int"}, 0, "int value") + int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") + uintFlag := f.Uint([]string{"uint"}, 0, "uint value") + uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") + stringFlag := f.String([]string{"string"}, "0", "string value") + singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") + doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") + mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") + mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") + nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") + nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") + float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") + durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") + extra := "one-extra-argument" + args := []string{ + "-bool", + "-bool2=true", + "--int", "22", + "--int64", "0x23", + "-uint", "24", + "--uint64", "25", + "-string", "hello", + "-squote='single'", + `-dquote="double"`, + `-mquote='mixed"`, + `-mquote2="mixed2'`, + `-nquote="'single nested'"`, + `-nquote2='"double nested"'`, + "-float64", "2718e28", + "-duration", "2m", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if *singleQuoteFlag != "single" { + t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) + } + if *doubleQuoteFlag != "double" { + t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) + } + if *mixedQuoteFlag != `'mixed"` { + t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) + } + if *mixed2QuoteFlag != `"mixed2'` { + t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) + } + if *nestedQuoteFlag != "'single nested'" { + t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) + } + if *nested2QuoteFlag != `"double nested"` { + t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func testPanic(f *FlagSet, t *testing.T) { + f.Int([]string{"-int"}, 0, "int value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + args := []string{ + "-int", "21", + } + f.Parse(args) +} + +func TestParsePanic(t *testing.T) { + ResetForTesting(func() {}) + testPanic(CommandLine, t) +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(CommandLine, t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.Var(&v, []string{"v"}, "usage") + if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +// Declare a user-defined boolean flag type. +type boolFlagVar struct { + count int +} + +func (b *boolFlagVar) String() string { + return fmt.Sprintf("%d", b.count) +} + +func (b *boolFlagVar) Set(value string) error { + if value == "true" { + b.count++ + } + return nil +} + +func (b *boolFlagVar) IsBoolFlag() bool { + return b.count < 4 +} + +func TestUserDefinedBool(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var b boolFlagVar + var err error + flags.Var(&b, []string{"b"}, "usage") + if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { + if b.count < 4 { + t.Error(err) + } + } + + if b.count != 4 { + t.Errorf("want: %d; got: %d", 4, b.count) + } + + if err == nil { + t.Error("expected error; got none") + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"-unknown"}) + if out := buf.String(); !strings.Contains(out, "-unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} + before := Bool([]string{"before"}, false, "") + if err := CommandLine.Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = Args() + after := Bool([]string{"after"}, false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"-flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by -flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"-help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, []string{"help"}, false, "help flag") + helpCalled = false + err = fs.Parse([]string{"-help"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +// Test the flag count functions. +func TestFlagCounts(t *testing.T) { + fs := NewFlagSet("help test", ContinueOnError) + var flag bool + fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") + fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") + + if fs.FlagCount() != 6 { + t.Fatal("FlagCount wrong. ", fs.FlagCount()) + } + if fs.FlagCountUndeprecated() != 4 { + t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) + } + if fs.NFlag() != 0 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } + err := fs.Parse([]string{"-fd", "-g", "-flag4"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if fs.NFlag() != 4 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } +} + +// Show up bug in sortFlags +func TestSortFlags(t *testing.T) { + fs := NewFlagSet("help TestSortFlags", ContinueOnError) + + var err error + + var b bool + fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") + + err = fs.Parse([]string{"--banana=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + count := 0 + + fs.VisitAll(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("VisitAll should not return a nil flag") + } + }) + flagcount := fs.FlagCount() + if flagcount != count { + t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) + } + // Make sure its idempotent + if flagcount != fs.FlagCount() { + t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) + } + + count = 0 + fs.Visit(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("Visit should not return a nil flag") + } + }) + nflag := fs.NFlag() + if nflag != count { + t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) + } + if nflag != fs.NFlag() { + t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) + } +} diff --git a/pkg/mount/MAINTAINERS b/pkg/mount/MAINTAINERS new file mode 100644 index 00000000..1e998f8a --- /dev/null +++ b/pkg/mount/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/pkg/mount/flags.go b/pkg/mount/flags.go new file mode 100644 index 00000000..742698e8 --- /dev/null +++ b/pkg/mount/flags.go @@ -0,0 +1,62 @@ +package mount + +import ( + "strings" +) + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + flags := map[string]struct { + clear bool + flag int + }{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "private": {false, PRIVATE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, + } + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} diff --git a/pkg/mount/flags_freebsd.go b/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000..4ddf4d70 --- /dev/null +++ b/pkg/mount/flags_freebsd.go @@ -0,0 +1,28 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + RDONLY = C.MNT_RDONLY + NOSUID = C.MNT_NOSUID + NOEXEC = C.MNT_NOEXEC + SYNCHRONOUS = C.MNT_SYNCHRONOUS + NOATIME = C.MNT_NOATIME + + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + PRIVATE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go new file mode 100644 index 00000000..0bb47d8c --- /dev/null +++ b/pkg/mount/flags_linux.go @@ -0,0 +1,23 @@ +package mount + +import ( + "syscall" +) + +const ( + RDONLY = syscall.MS_RDONLY + NOSUID = syscall.MS_NOSUID + NODEV = syscall.MS_NODEV + NOEXEC = syscall.MS_NOEXEC + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + DIRSYNC = syscall.MS_DIRSYNC + REMOUNT = syscall.MS_REMOUNT + MANDLOCK = syscall.MS_MANDLOCK + NOATIME = syscall.MS_NOATIME + NODIRATIME = syscall.MS_NODIRATIME + BIND = syscall.MS_BIND + RBIND = syscall.MS_BIND | syscall.MS_REC + PRIVATE = syscall.MS_PRIVATE + RELATIME = syscall.MS_RELATIME + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000..5a141088 --- /dev/null +++ b/pkg/mount/flags_unsupported.go @@ -0,0 +1,22 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + PRIVATE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go new file mode 100644 index 00000000..5ca73160 --- /dev/null +++ b/pkg/mount/mount.go @@ -0,0 +1,70 @@ +package mount + +import ( + "time" +) + +func GetMounts() ([]*MountInfo, error) { + return parseMountTable() +} + +// Looks at /proc/self/mountinfo to determine of the specified +// mountpoint has been mounted +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount the specified options at the target path only if +// the target is not mounted +// Options must be specified as fstab style +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// Mount the specified options at the target path +// reguardless if the target is mounted or not +// Options must be specified as fstab style +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount the target only if it is mounted +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// Unmount the target reguardless if it is mounted or not +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/pkg/mount/mount_test.go b/pkg/mount/mount_test.go new file mode 100644 index 00000000..5c7f1b86 --- /dev/null +++ b/pkg/mount/mount_test.go @@ -0,0 +1,137 @@ +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} diff --git a/pkg/mount/mounter_freebsd.go b/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000..bb870e6f --- /dev/null +++ b/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/pkg/mount/mounter_linux.go b/pkg/mount/mounter_linux.go new file mode 100644 index 00000000..dd4280c7 --- /dev/null +++ b/pkg/mount/mounter_linux.go @@ -0,0 +1,21 @@ +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/pkg/mount/mounter_unsupported.go b/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000..eb93365e --- /dev/null +++ b/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go new file mode 100644 index 00000000..78b83ced --- /dev/null +++ b/pkg/mount/mountinfo.go @@ -0,0 +1,7 @@ +package mount + +type MountInfo struct { + Id, Parent, Major, Minor int + Root, Mountpoint, Opts string + Fstype, Source, VfsOpts string +} diff --git a/pkg/mount/mountinfo_freebsd.go b/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 00000000..a16bdb84 --- /dev/null +++ b/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,38 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +func parseMountTable() ([]*MountInfo, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*MountInfo + for _, entry := range entries { + var mountinfo MountInfo + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000..84bf5516 --- /dev/null +++ b/pkg/mount/mountinfo_linux.go @@ -0,0 +1,74 @@ +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s " +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +func parseMountTable() ([]*MountInfo, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*MountInfo, error) { + var ( + s = bufio.NewScanner(r) + out = []*MountInfo{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &MountInfo{} + text = s.Text() + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.Id, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} diff --git a/pkg/mount/mountinfo_linux_test.go b/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 00000000..3c214476 --- /dev/null +++ b/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,448 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} diff --git a/pkg/mount/mountinfo_unsupported.go b/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 00000000..352336b9 --- /dev/null +++ b/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*MountInfo, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go new file mode 100644 index 00000000..ebb5850b --- /dev/null +++ b/pkg/namesgenerator/names-generator.go @@ -0,0 +1,92 @@ +package namesgenerator + +import ( + "fmt" + "math/rand" + "time" +) + +var ( + left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil"} + // Docker 0.7.x generates names from notable scientists and hackers. + // + // Ada Lovelace invented the first algorithm. http://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. http://en.wikipedia.org/wiki/Ada_Yonath + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. http://en.wikipedia.org/wiki/Adele_Goldstine + // Alan Turing was a founding father of computer science. http://en.wikipedia.org/wiki/Alan_Turing. + // Albert Einstein invented the general theory of relativity. http://en.wikipedia.org/wiki/Albert_Einstein + // Ambroise Pare invented modern surgery. http://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. http://en.wikipedia.org/wiki/Archimedes + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. http://en.wikipedia.org/wiki/Barbara_McClintock + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + // Charles Babbage invented the concept of a programmable computer. http://en.wikipedia.org/wiki/Charles_Babbage. + // Charles Darwin established the principles of natural evolution. http://en.wikipedia.org/wiki/Charles_Darwin. + // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. http://en.wikipedia.org/wiki/Dennis_Ritchie http://en.wikipedia.org/wiki/Ken_Thompson + // Douglas Engelbart gave the mother of all demos: http://en.wikipedia.org/wiki/Douglas_Engelbart + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - http://en.wikipedia.org/wiki/Elizabeth_Blackwell + // Emmett Brown invented time travel. http://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + // Enrico Fermi invented the first nuclear reactor. http://en.wikipedia.org/wiki/Enrico_Fermi. + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. http://en.wikipedia.org/wiki/Erna_Schneider_Hoover + // Euclid invented geometry. http://en.wikipedia.org/wiki/Euclid + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. http://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. http://en.wikipedia.org/wiki/Galileo_Galilei + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - http://en.wikipedia.org/wiki/Gertrude_Elion + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. http://en.wikipedia.org/wiki/Grace_Hopper + // Henry Poincare made fundamental contributions in several fields of mathematics. http://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - http://en.wikipedia.org/wiki/Hypatia + // Isaac Newton invented classic mechanics and modern optics. http://en.wikipedia.org/wiki/Isaac_Newton + // Jane Colden - American botanist widely considered the first female American botanist - http://en.wikipedia.org/wiki/Jane_Colden + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - http://en.wikipedia.org/wiki/Jane_Goodall + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. http://en.wikipedia.org/wiki/Jean_Bartik + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. http://en.wikipedia.org/wiki/Jean_E._Sammet + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - http://en.wikipedia.org/wiki/Johanna_Mestorf + // John McCarthy invented LISP: http://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - http://en.wikipedia.org/wiki/June_Almeida + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. http://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + // Leonardo Da Vinci invented too many things to list here. http://en.wikipedia.org/wiki/Leonardo_da_Vinci. + // Linus Torvalds invented Linux and Git. http://en.wikipedia.org/wiki/Linus_Torvalds + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - http://en.wikipedia.org/wiki/Lise_Meitner + // Louis Pasteur discovered vaccination, fermentation and pasteurization. http://en.wikipedia.org/wiki/Louis_Pasteur. + // Malcolm McLean invented the modern shipping container: http://en.wikipedia.org/wiki/Malcom_McLean + // Maria Ardinghelli - Italian translator, mathematician and physicist - http://en.wikipedia.org/wiki/Maria_Ardinghelli + // Maria Kirch - German astronomer and first woman to discover a comet - http://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - http://en.wikipedia.org/wiki/Maria_Mayer + // Marie Curie discovered radioactivity. http://en.wikipedia.org/wiki/Marie_Curie. + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - http://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - http://en.wikipedia.org/wiki/Mary_Leakey + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. http://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + // Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr. + // Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla + // Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. http://en.wikipedia.org/wiki/Rachel_Carson + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). http://en.wikipedia.org/wiki/Radia_Perlman + // Richard Feynman was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. http://en.wikiquote.org/wiki/Richard_Stallman + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - http://en.wikipedia.org/wiki/Rosalind_Franklin + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - http://en.wikipedia.org/wiki/Sofia_Kovalevskaya + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. http://en.wikipedia.org/wiki/Sophie_Wilson + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking + // Steve Wozniak invented the Apple I and Apple II. http://en.wikipedia.org/wiki/Steve_Wozniak + // Werner Heisenberg was a founding father of quantum mechanics. http://en.wikipedia.org/wiki/Werner_Heisenberg + // William Shockley, Walter Houser Brattain and John Bardeen co-invented the transistor (thanks Brian Goff). + // http://en.wikipedia.org/wiki/John_Bardeen + // http://en.wikipedia.org/wiki/Walter_Houser_Brattain + // http://en.wikipedia.org/wiki/William_Shockley + right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yonath"} +) + +func GetRandomName(retry int) string { + rand.Seed(time.Now().UnixNano()) + +begin: + name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rand.Intn(10)) + } + return name +} diff --git a/pkg/namesgenerator/names-generator_test.go b/pkg/namesgenerator/names-generator_test.go new file mode 100644 index 00000000..2652d42a --- /dev/null +++ b/pkg/namesgenerator/names-generator_test.go @@ -0,0 +1,23 @@ +package namesgenerator + +import ( + "testing" +) + +// Make sure the generated names are awesome +func TestGenerateAwesomeNames(t *testing.T) { + name := GetRandomName(0) + if !isAwesome(name) { + t.Fatalf("Generated name '%s' is not awesome.", name) + } +} + +// To be awesome, a container name must involve cool inventors, be easy to remember, +// be at least mildly funny, and always be politically correct for enterprise adoption. +func isAwesome(name string) bool { + coolInventorNames := true + easyToRemember := true + mildlyFunnyOnOccasion := true + politicallyCorrect := true + return coolInventorNames && easyToRemember && mildlyFunnyOnOccasion && politicallyCorrect +} diff --git a/pkg/networkfs/MAINTAINERS b/pkg/networkfs/MAINTAINERS new file mode 100644 index 00000000..e0f18f14 --- /dev/null +++ b/pkg/networkfs/MAINTAINERS @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff --git a/pkg/networkfs/etchosts/etchosts.go b/pkg/networkfs/etchosts/etchosts.go new file mode 100644 index 00000000..6cf29b04 --- /dev/null +++ b/pkg/networkfs/etchosts/etchosts.go @@ -0,0 +1,53 @@ +package etchosts + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" +) + +var defaultContent = map[string]string{ + "localhost": "127.0.0.1", + "localhost ip6-localhost ip6-loopback": "::1", + "ip6-localnet": "fe00::0", + "ip6-mcastprefix": "ff00::0", + "ip6-allnodes": "ff02::1", + "ip6-allrouters": "ff02::2", +} + +func Build(path, IP, hostname, domainname string, extraContent *map[string]string) error { + content := bytes.NewBuffer(nil) + if IP != "" { + if domainname != "" { + content.WriteString(fmt.Sprintf("%s\t%s.%s %s\n", IP, hostname, domainname, hostname)) + } else { + content.WriteString(fmt.Sprintf("%s\t%s\n", IP, hostname)) + } + } + + for hosts, ip := range defaultContent { + if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + return err + } + } + + if extraContent != nil { + for hosts, ip := range *extraContent { + if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + return err + } + } + } + + return ioutil.WriteFile(path, content.Bytes(), 0644) +} + +func Update(path, IP, hostname string) error { + old, err := ioutil.ReadFile(path) + if err != nil { + return err + } + var re = regexp.MustCompile(fmt.Sprintf("(\\S*)(\\t%s)", regexp.QuoteMeta(hostname))) + return ioutil.WriteFile(path, re.ReplaceAll(old, []byte(IP+"$2")), 0644) +} diff --git a/pkg/networkfs/etchosts/etchosts_test.go b/pkg/networkfs/etchosts/etchosts_test.go new file mode 100644 index 00000000..05a4f447 --- /dev/null +++ b/pkg/networkfs/etchosts/etchosts_test.go @@ -0,0 +1,108 @@ +package etchosts + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestBuildHostnameDomainname(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildHostname(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "10.11.12.13", "testhostname", "", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "10.11.12.13\ttesthostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildNoIP(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "", "testhostname", "", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := ""; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestUpdate(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + if err := Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil); err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } + + if err := Update(file.Name(), "1.1.1.1", "testhostname"); err != nil { + t.Fatal(err) + } + + content, err = ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "1.1.1.1\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} diff --git a/pkg/networkfs/resolvconf/resolvconf.go b/pkg/networkfs/resolvconf/resolvconf.go new file mode 100644 index 00000000..9165caea --- /dev/null +++ b/pkg/networkfs/resolvconf/resolvconf.go @@ -0,0 +1,92 @@ +package resolvconf + +import ( + "bytes" + "io/ioutil" + "regexp" + "strings" +) + +var ( + nsRegexp = regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`) + searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) +) + +func Get() ([]byte, error) { + resolv, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + return nil, err + } + return resolv, nil +} + +// getLines parses input into lines and strips away comments. +func getLines(input []byte, commentMarker []byte) [][]byte { + lines := bytes.Split(input, []byte("\n")) + var output [][]byte + for _, currentLine := range lines { + var commentIndex = bytes.Index(currentLine, commentMarker) + if commentIndex == -1 { + output = append(output, currentLine) + } else { + output = append(output, currentLine[:commentIndex]) + } + } + return output +} + +// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf +func GetNameservers(resolvConf []byte) []string { + nameservers := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + var ns = nsRegexp.FindSubmatch(line) + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])) + } + } + return nameservers +} + +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + nameservers := []string{} + for _, nameserver := range GetNameservers(resolvConf) { + nameservers = append(nameservers, nameserver+"/32") + } + return nameservers +} + +// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func GetSearchDomains(resolvConf []byte) []string { + domains := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := searchRegexp.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + +func Build(path string, dns, dnsSearch []string) error { + content := bytes.NewBuffer(nil) + for _, dns := range dns { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return err + } + } + if len(dnsSearch) > 0 { + if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { + if _, err := content.WriteString("search " + searchString + "\n"); err != nil { + return err + } + } + } + + return ioutil.WriteFile(path, content.Bytes(), 0644) +} diff --git a/pkg/networkfs/resolvconf/resolvconf_test.go b/pkg/networkfs/resolvconf/resolvconf_test.go new file mode 100644 index 00000000..6187acba --- /dev/null +++ b/pkg/networkfs/resolvconf/resolvconf_test.go @@ -0,0 +1,158 @@ +package resolvconf + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestGet(t *testing.T) { + resolvConfUtils, err := Get() + if err != nil { + t.Fatal(err) + } + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + t.Fatal(err) + } + if string(resolvConfUtils) != string(resolvConfSystem) { + t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") + } +} + +func TestGetNameservers(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4", "40.3.200.10"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"}, + } { + test := GetNameservers([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func TestGetNameserversAsCIDR(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, + } { + test := GetNameserversAsCIDR([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func TestGetSearchDomains(t *testing.T) { + for resolv, result := range map[string][]string{ + `search example.com`: {"example.com"}, + `search example.com # ignored`: {"example.com"}, + ` search example.com `: {"example.com"}, + ` search example.com # ignored`: {"example.com"}, + `search foo.example.com example.com`: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com `: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"}, + ``: {}, + `# ignored`: {}, + `nameserver 1.2.3.4 +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search dup1.example.com dup2.example.com +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search foo.example.com example.com +nameserver 4.30.20.100`: {"foo.example.com", "example.com"}, + } { + test := GetSearchDomains([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func strSlicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if v != b[i] { + return false + } + } + + return true +} + +func TestBuild(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"}) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\nsearch search1\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildWithZeroLengthDomainSearch(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"."}) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } + if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) { + t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content) + } +} diff --git a/pkg/parsers/MAINTAINERS b/pkg/parsers/MAINTAINERS new file mode 100644 index 00000000..8c890253 --- /dev/null +++ b/pkg/parsers/MAINTAINERS @@ -0,0 +1 @@ +Erik Hollensbe (@erikh) diff --git a/pkg/parsers/filters/parse.go b/pkg/parsers/filters/parse.go new file mode 100644 index 00000000..27c7132e --- /dev/null +++ b/pkg/parsers/filters/parse.go @@ -0,0 +1,63 @@ +package filters + +import ( + "encoding/json" + "errors" + "strings" +) + +type Args map[string][]string + +// Parse the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + var filters Args = prev + if prev == nil { + filters = Args{} + } + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrorBadFormat + } + + f := strings.SplitN(arg, "=", 2) + filters[f[0]] = append(filters[f[0]], f[1]) + + return filters, nil +} + +var ErrorBadFormat = errors.New("bad format of filter (expected name=value)") + +// packs the Args into an string for easy transport from client to server +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if len(a) == 0 { + return "", nil + } + + buf, err := json.Marshal(a) + if err != nil { + return "", err + } + return string(buf), nil +} + +// unpacks the filter Args +func FromParam(p string) (Args, error) { + args := Args{} + if len(p) == 0 { + return args, nil + } + err := json.Unmarshal([]byte(p), &args) + if err != nil { + return nil, err + } + return args, nil +} diff --git a/pkg/parsers/filters/parse_test.go b/pkg/parsers/filters/parse_test.go new file mode 100644 index 00000000..a2483502 --- /dev/null +++ b/pkg/parsers/filters/parse_test.go @@ -0,0 +1,78 @@ +package filters + +import ( + "sort" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = Args{} + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args["created"]) != 1 { + t.Errorf("failed to set this arg") + } + if len(args["image.name"]) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParam(t *testing.T) { + a := Args{ + "created": []string{"today"}, + "image.name": []string{"ubuntu*", "*untu"}, + } + + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + for key, vals := range v1 { + if _, ok := a[key]; !ok { + t.Errorf("could not find key %s in original set", key) + } + sort.Strings(vals) + sort.Strings(a[key]) + if len(vals) != len(a[key]) { + t.Errorf("value lengths ought to match") + continue + } + for i := range vals { + if vals[i] != a[key][i] { + t.Errorf("expected %s, but got %s", a[key][i], vals[i]) + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if len(a) != len(v1) { + t.Errorf("these should both be empty sets") + } +} diff --git a/pkg/parsers/kernel/kernel.go b/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..70d09003 --- /dev/null +++ b/pkg/parsers/kernel/kernel.go @@ -0,0 +1,93 @@ +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +type KernelVersionInfo struct { + Kernel int + Major int + Minor int + Flavor string +} + +func (k *KernelVersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// Compare two KernelVersionInfo struct. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b *KernelVersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +func GetKernelVersion() (*KernelVersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +func ParseRelease(release string) (*KernelVersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &KernelVersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/pkg/parsers/kernel/kernel_test.go b/pkg/parsers/kernel/kernel_test.go new file mode 100644 index 00000000..e211a63b --- /dev/null +++ b/pkg/parsers/kernel/kernel_test.go @@ -0,0 +1,61 @@ +package kernel + +import ( + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { + var ( + a *KernelVersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) +} + +func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) +} diff --git a/pkg/parsers/kernel/uname_linux.go b/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..8ca814c1 --- /dev/null +++ b/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,16 @@ +package kernel + +import ( + "syscall" +) + +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/pkg/parsers/kernel/uname_unsupported.go b/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..00c54225 --- /dev/null +++ b/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/pkg/parsers/operatingsystem/operatingsystem.go b/pkg/parsers/operatingsystem/operatingsystem.go new file mode 100644 index 00000000..af185f9f --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem.go @@ -0,0 +1,40 @@ +package operatingsystem + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" +) + +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { + b = b[i+13:] + return string(b[:bytes.IndexByte(b, '"')]), nil + } + return "", errors.New("PRETTY_NAME not found") +} + +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) { + return true, nil + } + } + return false, nil +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_test.go b/pkg/parsers/operatingsystem/operatingsystem_test.go new file mode 100644 index 00000000..d264b35f --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_test.go @@ -0,0 +1,123 @@ +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var ( + backup = etcOsRelease + ubuntuTrusty = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + gentoo = []byte(`NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`) + noPrettyName = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + ) + + dir := os.TempDir() + defer func() { + etcOsRelease = backup + os.RemoveAll(dir) + }() + + etcOsRelease = filepath.Join(dir, "etcOsRelease") + for expect, osRelease := range map[string][]byte{ + "Ubuntu 14.04 LTS": ubuntuTrusty, + "Gentoo/Linux": gentoo, + "": noPrettyName, + } { + if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if s != expect { + if expect == "" { + t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) + } else { + t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) + } + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + defer func() { + proc1Cgroup = backup + os.RemoveAll(dir) + }() + + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go new file mode 100644 index 00000000..e6e3718b --- /dev/null +++ b/pkg/parsers/parsers.go @@ -0,0 +1,110 @@ +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// FIXME: Change this not to receive default value as parameter +func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { + var ( + proto string + host string + port int + ) + addr = strings.TrimSpace(addr) + switch { + case addr == "tcp://": + return "", fmt.Errorf("Invalid bind address format: %s", addr) + case strings.HasPrefix(addr, "unix://"): + proto = "unix" + addr = strings.TrimPrefix(addr, "unix://") + if addr == "" { + addr = defaultUnix + } + case strings.HasPrefix(addr, "tcp://"): + proto = "tcp" + addr = strings.TrimPrefix(addr, "tcp://") + case strings.HasPrefix(addr, "fd://"): + return addr, nil + case addr == "": + proto = "unix" + addr = defaultUnix + default: + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid bind address protocol: %s", addr) + } + proto = "tcp" + } + + if proto != "unix" && strings.Contains(addr, ":") { + hostParts := strings.Split(addr, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + if hostParts[0] != "" { + host = hostParts[0] + } else { + host = defaultHost + } + + if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { + port = p + } else { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + + } else if proto == "tcp" && !strings.Contains(addr, ":") { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } else { + host = addr + } + if proto == "unix" { + return fmt.Sprintf("%s://%s", proto, host), nil + } + return fmt.Sprintf("%s://%s:%d", proto, host, port), nil +} + +// Get a repos name and returns the right reposName + tag +// The tag can be confusing because of a port in a repository name. +// Ex: localhost.localdomain:5000/samalba/hipache:latest +func ParseRepositoryTag(repos string) (string, string) { + n := strings.LastIndex(repos, ":") + if n < 0 { + return repos, "" + } + if tag := repos[n+1:]; !strings.Contains(tag, "/") { + return repos[:n], tag + } + return repos, "" +} + +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} diff --git a/pkg/parsers/parsers_test.go b/pkg/parsers/parsers_test.go new file mode 100644 index 00000000..12b8df57 --- /dev/null +++ b/pkg/parsers/parsers_test.go @@ -0,0 +1,83 @@ +package parsers + +import ( + "testing" +) + +func TestParseHost(t *testing.T) { + var ( + defaultHttpHost = "127.0.0.1" + defaultUnix = "/var/run/docker.sock" + ) + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { + t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { + t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { + t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { + t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { + t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { + t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil { + t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) + } +} + +func TestParseRepositoryTag(t *testing.T) { + if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) + } +} + +func TestParsePortMapping(t *testing.T) { + data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") + if err != nil { + t.Fatal(err) + } + + if len(data) != 3 { + t.FailNow() + } + if data["ip"] != "192.168.1.1" { + t.Fail() + } + if data["public"] != "80" { + t.Fail() + } + if data["private"] != "8080" { + t.Fail() + } +} diff --git a/pkg/pools/pools.go b/pkg/pools/pools.go new file mode 100644 index 00000000..5338a0cf --- /dev/null +++ b/pkg/pools/pools.go @@ -0,0 +1,111 @@ +// +build go1.3 + +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // Pool which returns bufio.Reader with a 32K buffer + BufioReader32KPool *BufioReaderPool + // Pool which returns bufio.Writer with a 32K buffer + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +type BufioReaderPool struct { + pool sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/pkg/pools/pools_nopool.go b/pkg/pools/pools_nopool.go new file mode 100644 index 00000000..48903c23 --- /dev/null +++ b/pkg/pools/pools_nopool.go @@ -0,0 +1,73 @@ +// +build !go1.3 + +package pools + +import ( + "bufio" + "io" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + BufioReader32KPool *BufioReaderPool + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +type BufioReaderPool struct { + size int +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{size: size} +} + +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + return bufio.NewReaderSize(r, bufPool.size) +} + +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) +} + +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + return readCloser.Close() + } + return nil + }) +} + +type BufioWriterPool struct { + size int +} + +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{size: size} +} + +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + return bufio.NewWriterSize(w, bufPool.size) +} + +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) +} + +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + return writeCloser.Close() + } + return nil + }) +} diff --git a/pkg/promise/promise.go b/pkg/promise/promise.go new file mode 100644 index 00000000..dd52b908 --- /dev/null +++ b/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/pkg/proxy/MAINTAINERS b/pkg/proxy/MAINTAINERS new file mode 100644 index 00000000..8c890253 --- /dev/null +++ b/pkg/proxy/MAINTAINERS @@ -0,0 +1 @@ +Erik Hollensbe (@erikh) diff --git a/pkg/proxy/network_proxy_test.go b/pkg/proxy/network_proxy_test.go new file mode 100644 index 00000000..9e382567 --- /dev/null +++ b/pkg/proxy/network_proxy_test.go @@ -0,0 +1,216 @@ +package proxy + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "testing" + "time" +) + +var testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo") +var testBufSize = len(testBuf) + +type EchoServer interface { + Run() + Close() + LocalAddr() net.Addr +} + +type TCPEchoServer struct { + listener net.Listener + testCtx *testing.T +} + +type UDPEchoServer struct { + conn net.PacketConn + testCtx *testing.T +} + +func NewEchoServer(t *testing.T, proto, address string) EchoServer { + var server EchoServer + if strings.HasPrefix(proto, "tcp") { + listener, err := net.Listen(proto, address) + if err != nil { + t.Fatal(err) + } + server = &TCPEchoServer{listener: listener, testCtx: t} + } else { + socket, err := net.ListenPacket(proto, address) + if err != nil { + t.Fatal(err) + } + server = &UDPEchoServer{conn: socket, testCtx: t} + } + return server +} + +func (server *TCPEchoServer) Run() { + go func() { + for { + client, err := server.listener.Accept() + if err != nil { + return + } + go func(client net.Conn) { + if _, err := io.Copy(client, client); err != nil { + server.testCtx.Logf("can't echo to the client: %v\n", err.Error()) + } + client.Close() + }(client) + } + }() +} + +func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() } +func (server *TCPEchoServer) Close() { server.listener.Addr() } + +func (server *UDPEchoServer) Run() { + go func() { + readBuf := make([]byte, 1024) + for { + read, from, err := server.conn.ReadFrom(readBuf) + if err != nil { + return + } + for i := 0; i != read; { + written, err := server.conn.WriteTo(readBuf[i:read], from) + if err != nil { + break + } + i += written + } + } + }() +} + +func (server *UDPEchoServer) LocalAddr() net.Addr { return server.conn.LocalAddr() } +func (server *UDPEchoServer) Close() { server.conn.Close() } + +func testProxyAt(t *testing.T, proto string, proxy Proxy, addr string) { + defer proxy.Close() + go proxy.Run() + client, err := net.Dial(proto, addr) + if err != nil { + t.Fatalf("Can't connect to the proxy: %v", err) + } + defer client.Close() + client.SetDeadline(time.Now().Add(10 * time.Second)) + if _, err = client.Write(testBuf); err != nil { + t.Fatal(err) + } + recvBuf := make([]byte, testBufSize) + if _, err = client.Read(recvBuf); err != nil { + t.Fatal(err) + } + if !bytes.Equal(testBuf, recvBuf) { + t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) + } +} + +func testProxy(t *testing.T, proto string, proxy Proxy) { + testProxyAt(t, proto, proxy, proxy.FrontendAddr().String()) +} + +func TestTCP4Proxy(t *testing.T) { + backend := NewEchoServer(t, "tcp", "127.0.0.1:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "tcp", proxy) +} + +func TestTCP6Proxy(t *testing.T) { + backend := NewEchoServer(t, "tcp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "tcp", proxy) +} + +func TestTCPDualStackProxy(t *testing.T) { + // If I understand `godoc -src net favoriteAddrFamily` (used by the + // net.Listen* functions) correctly this should work, but it doesn't. + t.Skip("No support for dual stack yet") + backend := NewEchoServer(t, "tcp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + ipv4ProxyAddr := &net.TCPAddr{ + IP: net.IPv4(127, 0, 0, 1), + Port: proxy.FrontendAddr().(*net.TCPAddr).Port, + } + testProxyAt(t, "tcp", proxy, ipv4ProxyAddr.String()) +} + +func TestUDP4Proxy(t *testing.T) { + backend := NewEchoServer(t, "udp", "127.0.0.1:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "udp", proxy) +} + +func TestUDP6Proxy(t *testing.T) { + backend := NewEchoServer(t, "udp", "[::1]:0") + defer backend.Close() + backend.Run() + frontendAddr := &net.UDPAddr{IP: net.IPv6loopback, Port: 0} + proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) + if err != nil { + t.Fatal(err) + } + testProxy(t, "udp", proxy) +} + +func TestUDPWriteError(t *testing.T) { + frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} + // Hopefully, this port will be free: */ + backendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 25587} + proxy, err := NewProxy(frontendAddr, backendAddr) + if err != nil { + t.Fatal(err) + } + defer proxy.Close() + go proxy.Run() + client, err := net.Dial("udp", "127.0.0.1:25587") + if err != nil { + t.Fatalf("Can't connect to the proxy: %v", err) + } + defer client.Close() + // Make sure the proxy doesn't stop when there is no actual backend: + client.Write(testBuf) + client.Write(testBuf) + backend := NewEchoServer(t, "udp", "127.0.0.1:25587") + defer backend.Close() + backend.Run() + client.SetDeadline(time.Now().Add(10 * time.Second)) + if _, err = client.Write(testBuf); err != nil { + t.Fatal(err) + } + recvBuf := make([]byte, testBufSize) + if _, err = client.Read(recvBuf); err != nil { + t.Fatal(err) + } + if !bytes.Equal(testBuf, recvBuf) { + t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) + } +} diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go new file mode 100644 index 00000000..7a711f65 --- /dev/null +++ b/pkg/proxy/proxy.go @@ -0,0 +1,29 @@ +package proxy + +import ( + "fmt" + "net" +) + +type Proxy interface { + // Start forwarding traffic back and forth the front and back-end + // addresses. + Run() + // Stop forwarding traffic and close both ends of the Proxy. + Close() + // Return the address on which the proxy is listening. + FrontendAddr() net.Addr + // Return the proxied address. + BackendAddr() net.Addr +} + +func NewProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { + switch frontendAddr.(type) { + case *net.UDPAddr: + return NewUDPProxy(frontendAddr.(*net.UDPAddr), backendAddr.(*net.UDPAddr)) + case *net.TCPAddr: + return NewTCPProxy(frontendAddr.(*net.TCPAddr), backendAddr.(*net.TCPAddr)) + default: + panic(fmt.Errorf("Unsupported protocol")) + } +} diff --git a/pkg/proxy/stub_proxy.go b/pkg/proxy/stub_proxy.go new file mode 100644 index 00000000..76844270 --- /dev/null +++ b/pkg/proxy/stub_proxy.go @@ -0,0 +1,22 @@ +package proxy + +import ( + "net" +) + +type StubProxy struct { + frontendAddr net.Addr + backendAddr net.Addr +} + +func (p *StubProxy) Run() {} +func (p *StubProxy) Close() {} +func (p *StubProxy) FrontendAddr() net.Addr { return p.frontendAddr } +func (p *StubProxy) BackendAddr() net.Addr { return p.backendAddr } + +func NewStubProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { + return &StubProxy{ + frontendAddr: frontendAddr, + backendAddr: backendAddr, + }, nil +} diff --git a/pkg/proxy/tcp_proxy.go b/pkg/proxy/tcp_proxy.go new file mode 100644 index 00000000..1aa6d9fd --- /dev/null +++ b/pkg/proxy/tcp_proxy.go @@ -0,0 +1,89 @@ +package proxy + +import ( + "io" + "log" + "net" + "syscall" +) + +type TCPProxy struct { + listener *net.TCPListener + frontendAddr *net.TCPAddr + backendAddr *net.TCPAddr +} + +func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) { + listener, err := net.ListenTCP("tcp", frontendAddr) + if err != nil { + return nil, err + } + // If the port in frontendAddr was 0 then ListenTCP will have a picked + // a port to listen on, hence the call to Addr to get that actual port: + return &TCPProxy{ + listener: listener, + frontendAddr: listener.Addr().(*net.TCPAddr), + backendAddr: backendAddr, + }, nil +} + +func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { + backend, err := net.DialTCP("tcp", nil, proxy.backendAddr) + if err != nil { + log.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err) + client.Close() + return + } + + event := make(chan int64) + var broker = func(to, from *net.TCPConn) { + written, err := io.Copy(to, from) + if err != nil { + // If the socket we are writing to is shutdown with + // SHUT_WR, forward it to the other end of the pipe: + if err, ok := err.(*net.OpError); ok && err.Err == syscall.EPIPE { + from.CloseWrite() + } + } + to.CloseRead() + event <- written + } + + go broker(client, backend) + go broker(backend, client) + + var transferred int64 = 0 + for i := 0; i < 2; i++ { + select { + case written := <-event: + transferred += written + case <-quit: + // Interrupt the two brokers and "join" them. + client.Close() + backend.Close() + for ; i < 2; i++ { + transferred += <-event + } + return + } + } + client.Close() + backend.Close() +} + +func (proxy *TCPProxy) Run() { + quit := make(chan bool) + defer close(quit) + for { + client, err := proxy.listener.Accept() + if err != nil { + log.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) + return + } + go proxy.clientLoop(client.(*net.TCPConn), quit) + } +} + +func (proxy *TCPProxy) Close() { proxy.listener.Close() } +func (proxy *TCPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } +func (proxy *TCPProxy) BackendAddr() net.Addr { return proxy.backendAddr } diff --git a/pkg/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go new file mode 100644 index 00000000..ae6a7bbc --- /dev/null +++ b/pkg/proxy/udp_proxy.go @@ -0,0 +1,157 @@ +package proxy + +import ( + "encoding/binary" + "log" + "net" + "strings" + "sync" + "syscall" + "time" +) + +const ( + UDPConnTrackTimeout = 90 * time.Second + UDPBufSize = 2048 +) + +// A net.Addr where the IP is split into two fields so you can use it as a key +// in a map: +type connTrackKey struct { + IPHigh uint64 + IPLow uint64 + Port int +} + +func newConnTrackKey(addr *net.UDPAddr) *connTrackKey { + if len(addr.IP) == net.IPv4len { + return &connTrackKey{ + IPHigh: 0, + IPLow: uint64(binary.BigEndian.Uint32(addr.IP)), + Port: addr.Port, + } + } + return &connTrackKey{ + IPHigh: binary.BigEndian.Uint64(addr.IP[:8]), + IPLow: binary.BigEndian.Uint64(addr.IP[8:]), + Port: addr.Port, + } +} + +type connTrackMap map[connTrackKey]*net.UDPConn + +type UDPProxy struct { + listener *net.UDPConn + frontendAddr *net.UDPAddr + backendAddr *net.UDPAddr + connTrackTable connTrackMap + connTrackLock sync.Mutex +} + +func NewUDPProxy(frontendAddr, backendAddr *net.UDPAddr) (*UDPProxy, error) { + listener, err := net.ListenUDP("udp", frontendAddr) + if err != nil { + return nil, err + } + return &UDPProxy{ + listener: listener, + frontendAddr: listener.LocalAddr().(*net.UDPAddr), + backendAddr: backendAddr, + connTrackTable: make(connTrackMap), + }, nil +} + +func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) { + defer func() { + proxy.connTrackLock.Lock() + delete(proxy.connTrackTable, *clientKey) + proxy.connTrackLock.Unlock() + proxyConn.Close() + }() + + readBuf := make([]byte, UDPBufSize) + for { + proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout)) + again: + read, err := proxyConn.Read(readBuf) + if err != nil { + if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED { + // This will happen if the last write failed + // (e.g: nothing is actually listening on the + // proxied port on the container), ignore it + // and continue until UDPConnTrackTimeout + // expires: + goto again + } + return + } + for i := 0; i != read; { + written, err := proxy.listener.WriteToUDP(readBuf[i:read], clientAddr) + if err != nil { + return + } + i += written + } + } +} + +func (proxy *UDPProxy) Run() { + readBuf := make([]byte, UDPBufSize) + for { + read, from, err := proxy.listener.ReadFromUDP(readBuf) + if err != nil { + // NOTE: Apparently ReadFrom doesn't return + // ECONNREFUSED like Read do (see comment in + // UDPProxy.replyLoop) + if !isClosedError(err) { + log.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) + } + break + } + + fromKey := newConnTrackKey(from) + proxy.connTrackLock.Lock() + proxyConn, hit := proxy.connTrackTable[*fromKey] + if !hit { + proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) + if err != nil { + log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) + proxy.connTrackLock.Unlock() + continue + } + proxy.connTrackTable[*fromKey] = proxyConn + go proxy.replyLoop(proxyConn, from, fromKey) + } + proxy.connTrackLock.Unlock() + for i := 0; i != read; { + written, err := proxyConn.Write(readBuf[i:read]) + if err != nil { + log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) + break + } + i += written + } + } +} + +func (proxy *UDPProxy) Close() { + proxy.listener.Close() + proxy.connTrackLock.Lock() + defer proxy.connTrackLock.Unlock() + for _, conn := range proxy.connTrackTable { + conn.Close() + } +} + +func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } +func (proxy *UDPProxy) BackendAddr() net.Addr { return proxy.backendAddr } + +func isClosedError(err error) bool { + /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. + * See: + * http://golang.org/src/pkg/net/net.go + * https://code.google.com/p/go/issues/detail?id=4337 + * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ + */ + return strings.HasSuffix(err.Error(), "use of closed network connection") +} diff --git a/pkg/reexec/MAINTAINERS b/pkg/reexec/MAINTAINERS new file mode 100644 index 00000000..e48a0c7d --- /dev/null +++ b/pkg/reexec/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/pkg/reexec/README.md b/pkg/reexec/README.md new file mode 100644 index 00000000..45592ce8 --- /dev/null +++ b/pkg/reexec/README.md @@ -0,0 +1,5 @@ +## reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/pkg/reexec/command_linux.go b/pkg/reexec/command_linux.go new file mode 100644 index 00000000..8dc3f3a4 --- /dev/null +++ b/pkg/reexec/command_linux.go @@ -0,0 +1,18 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go new file mode 100644 index 00000000..a579318e --- /dev/null +++ b/pkg/reexec/command_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package reexec + +import ( + "os/exec" +) + +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/pkg/reexec/reexec.go b/pkg/reexec/reexec.go new file mode 100644 index 00000000..774e71c7 --- /dev/null +++ b/pkg/reexec/reexec.go @@ -0,0 +1,42 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registred under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +// Self returns the path to the current processes binary +func Self() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + name = lp + } + } + return name +} diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go new file mode 100644 index 00000000..63337542 --- /dev/null +++ b/pkg/signal/signal.go @@ -0,0 +1,19 @@ +package signal + +import ( + "os" + "os/signal" +) + +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go new file mode 100644 index 00000000..fcd3a8f2 --- /dev/null +++ b/pkg/signal/signal_darwin.go @@ -0,0 +1,40 @@ +package signal + +import ( + "syscall" +) + +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go new file mode 100644 index 00000000..102e9184 --- /dev/null +++ b/pkg/signal/signal_freebsd.go @@ -0,0 +1,42 @@ +package signal + +import ( + "syscall" +) + +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go new file mode 100644 index 00000000..a62f79d4 --- /dev/null +++ b/pkg/signal/signal_linux.go @@ -0,0 +1,43 @@ +package signal + +import ( + "syscall" +) + +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go new file mode 100644 index 00000000..99f94659 --- /dev/null +++ b/pkg/signal/signal_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux,!darwin,!freebsd + +package signal + +import ( + "syscall" +) + +var SignalMap = map[string]syscall.Signal{} diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go new file mode 100644 index 00000000..cbdfd1ff --- /dev/null +++ b/pkg/signal/trap.go @@ -0,0 +1,54 @@ +package signal + +import ( + "log" + "os" + gosignal "os/signal" + "sync/atomic" + "syscall" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is +// skipped and the process terminated directly. +// * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup. +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + signals := []os.Signal{os.Interrupt, syscall.SIGTERM} + if os.Getenv("DEBUG") == "" { + signals = append(signals, syscall.SIGQUIT) + } + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + go func(sig os.Signal) { + log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + // If the user really wants to interrupt, let him do so. + if atomic.LoadUint32(&interruptCount) < 3 { + atomic.AddUint32(&interruptCount, 1) + // Initiate the cleanup only once + if atomic.LoadUint32(&interruptCount) == 1 { + // Call cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + log.Printf("Force shutdown of docker, interrupting cleanup\n") + } + case syscall.SIGQUIT: + } + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} diff --git a/pkg/stdcopy/MAINTAINERS b/pkg/stdcopy/MAINTAINERS new file mode 100644 index 00000000..6dde4769 --- /dev/null +++ b/pkg/stdcopy/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go new file mode 100644 index 00000000..79e15bc8 --- /dev/null +++ b/pkg/stdcopy/stdcopy.go @@ -0,0 +1,172 @@ +package stdcopy + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/docker/docker/pkg/log" +) + +const ( + StdWriterPrefixLen = 8 + StdWriterFdIndex = 0 + StdWriterSizeIndex = 4 +) + +type StdType [StdWriterPrefixLen]byte + +var ( + Stdin StdType = StdType{0: 0} + Stdout StdType = StdType{0: 1} + Stderr StdType = StdType{0: 2} +) + +type StdWriter struct { + io.Writer + prefix StdType + sizeBuf []byte +} + +func (w *StdWriter) Write(buf []byte) (n int, err error) { + var n1, n2 int + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instanciated") + } + binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) + n1, err = w.Writer.Write(w.prefix[:]) + if err != nil { + n = n1 - StdWriterPrefixLen + } else { + n2, err = w.Writer.Write(buf) + n = n1 + n2 - StdWriterPrefixLen + } + if n < 0 { + n = 0 + } + return +} + +// NewStdWriter instanciates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be utils.Stdin, utils.Stdout, utils.Stderr. +func NewStdWriter(w io.Writer, t StdType) *StdWriter { + if len(t) != StdWriterPrefixLen { + return nil + } + + return &StdWriter{ + Writer: w, + prefix: t, + sizeBuf: make([]byte, 4), + } +} + +var ErrInvalidStdHeader = errors.New("Unrecognized input header") + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, 32*1024+StdWriterPrefixLen+1) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < StdWriterPrefixLen { + log.Debugf("Corrupted prefix: %v", buf[:nr]) + return written, nil + } + break + } + if er != nil { + log.Debugf("Error reading header: %s", er) + return 0, er + } + } + + // Check the first byte to know where to write + switch buf[StdWriterFdIndex] { + case 0: + fallthrough + case 1: + // Write on stdout + out = dstout + case 2: + // Write on stderr + out = dsterr + default: + log.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) + return 0, ErrInvalidStdHeader + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) + log.Debugf("framesize: %d", frameSize) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+StdWriterPrefixLen > bufLen { + log.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) + buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+StdWriterPrefixLen { + log.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr]) + return written, nil + } + break + } + if er != nil { + log.Debugf("Error reading frame: %s", er) + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) + if ew != nil { + log.Debugf("Error writing frame: %s", ew) + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + log.Debugf("Error Short Write: (%d on %d)", nw, frameSize) + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+StdWriterPrefixLen:]) + // Move the index + nr -= frameSize + StdWriterPrefixLen + } +} diff --git a/pkg/stdcopy/stdcopy_test.go b/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 00000000..14e6ed31 --- /dev/null +++ b/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,20 @@ +package stdcopy + +import ( + "bytes" + "io/ioutil" + "testing" +) + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/symlink/LICENSE.APACHE b/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/symlink/LICENSE.BSD b/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000..ebcfbcc7 --- /dev/null +++ b/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/symlink/MAINTAINERS b/pkg/symlink/MAINTAINERS new file mode 100644 index 00000000..51a41a5b --- /dev/null +++ b/pkg/symlink/MAINTAINERS @@ -0,0 +1,3 @@ +Tibor Vass (@tiborvass) +Cristian Staretu (@unclejack) +Tianon Gravi (@tianon) diff --git a/pkg/symlink/README.md b/pkg/symlink/README.md new file mode 100644 index 00000000..0d1dbb70 --- /dev/null +++ b/pkg/symlink/README.md @@ -0,0 +1,5 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go new file mode 100644 index 00000000..b4bdff24 --- /dev/null +++ b/pkg/symlink/fs.go @@ -0,0 +1,131 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(path) + if err != nil { + return "", err + } + root, err = filepath.Abs(root) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if cleanP == string(filepath.Separator) { + // never Lstat "/" itself + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p + string(filepath.Separator)) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go new file mode 100644 index 00000000..6b2496c4 --- /dev/null +++ b/pkg/symlink/fs_test.go @@ -0,0 +1,402 @@ +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatal("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/sysinfo/MAINTAINERS b/pkg/sysinfo/MAINTAINERS new file mode 100644 index 00000000..68a97d2f --- /dev/null +++ b/pkg/sysinfo/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go new file mode 100644 index 00000000..0c28719f --- /dev/null +++ b/pkg/sysinfo/sysinfo.go @@ -0,0 +1,47 @@ +package sysinfo + +import ( + "io/ioutil" + "log" + "os" + "path" + + "github.com/docker/libcontainer/cgroups" +) + +type SysInfo struct { + MemoryLimit bool + SwapLimit bool + IPv4ForwardingDisabled bool + AppArmor bool +} + +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil { + if !quiet { + log.Printf("WARNING: %s\n", err) + } + } else { + _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes")) + _, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) + sysInfo.MemoryLimit = err1 == nil && err2 == nil + if !sysInfo.MemoryLimit && !quiet { + log.Printf("WARNING: Your kernel does not support cgroup memory limit.") + } + + _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) + sysInfo.SwapLimit = err == nil + if !sysInfo.SwapLimit && !quiet { + log.Printf("WARNING: Your kernel does not support cgroup swap limit.") + } + } + + // Check if AppArmor seems to be enabled on this system. + if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) { + sysInfo.AppArmor = false + } else { + sysInfo.AppArmor = true + } + return sysInfo +} diff --git a/pkg/system/MAINTAINERS b/pkg/system/MAINTAINERS new file mode 100644 index 00000000..68a97d2f --- /dev/null +++ b/pkg/system/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/pkg/system/errors.go b/pkg/system/errors.go new file mode 100644 index 00000000..63045186 --- /dev/null +++ b/pkg/system/errors.go @@ -0,0 +1,9 @@ +package system + +import ( + "errors" +) + +var ( + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go new file mode 100644 index 00000000..e7022003 --- /dev/null +++ b/pkg/system/stat_linux.go @@ -0,0 +1,13 @@ +package system + +import ( + "syscall" +) + +func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { + return stat.Atim +} + +func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { + return stat.Mtim +} diff --git a/pkg/system/stat_unsupported.go b/pkg/system/stat_unsupported.go new file mode 100644 index 00000000..4686a4c3 --- /dev/null +++ b/pkg/system/stat_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +import "syscall" + +func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { + return stat.Atimespec +} + +func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { + return stat.Mtimespec +} diff --git a/pkg/system/utimes_darwin.go b/pkg/system/utimes_darwin.go new file mode 100644 index 00000000..4c6002fe --- /dev/null +++ b/pkg/system/utimes_darwin.go @@ -0,0 +1,11 @@ +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/pkg/system/utimes_freebsd.go b/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000..ceaa044c --- /dev/null +++ b/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/pkg/system/utimes_linux.go b/pkg/system/utimes_linux.go new file mode 100644 index 00000000..8f902982 --- /dev/null +++ b/pkg/system/utimes_linux.go @@ -0,0 +1,28 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + AT_FDCWD := -100 + AT_SYMLINK_NOFOLLOW := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff --git a/pkg/system/utimes_test.go b/pkg/system/utimes_test.go new file mode 100644 index 00000000..38e4020c --- /dev/null +++ b/pkg/system/utimes_test.go @@ -0,0 +1,64 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +func prepareFiles(t *testing.T) (string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink := prepareFiles(t) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{0, 0}, {0, 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/pkg/system/utimes_unsupported.go b/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000..adf2734f --- /dev/null +++ b/pkg/system/utimes_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux,!freebsd,!darwin + +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/pkg/system/xattrs_linux.go b/pkg/system/xattrs_linux.go new file mode 100644 index 00000000..00edb201 --- /dev/null +++ b/pkg/system/xattrs_linux.go @@ -0,0 +1,59 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Returns a nil slice and nil error if the xattr is not set +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/pkg/system/xattrs_unsupported.go b/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000..0060c167 --- /dev/null +++ b/pkg/system/xattrs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package system + +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/pkg/systemd/MAINTAINERS b/pkg/systemd/MAINTAINERS new file mode 100644 index 00000000..51228b36 --- /dev/null +++ b/pkg/systemd/MAINTAINERS @@ -0,0 +1 @@ +Brandon Philips (@philips) diff --git a/pkg/systemd/booted.go b/pkg/systemd/booted.go new file mode 100644 index 00000000..2aae931e --- /dev/null +++ b/pkg/systemd/booted.go @@ -0,0 +1,15 @@ +package systemd + +import ( + "os" +) + +// Conversion to Go of systemd's sd_booted() +func SdBooted() bool { + s, err := os.Stat("/run/systemd/system") + if err != nil { + return false + } + + return s.IsDir() +} diff --git a/pkg/systemd/listendfd.go b/pkg/systemd/listendfd.go new file mode 100644 index 00000000..0fbc0a6a --- /dev/null +++ b/pkg/systemd/listendfd.go @@ -0,0 +1,40 @@ +package systemd + +import ( + "errors" + "net" + "strconv" + + "github.com/coreos/go-systemd/activation" +) + +// ListenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func ListenFD(addr string) ([]net.Listener, error) { + // socket activation + listeners, err := activation.Listeners(false) + if err != nil { + return nil, err + } + + if listeners == nil || len(listeners) == 0 { + return nil, errors.New("No sockets found") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" { + addr = "*" + } + + fdNum, _ := strconv.Atoi(addr) + fdOffset := fdNum - 3 + if (addr != "*") && (len(listeners) < int(fdOffset)+1) { + return nil, errors.New("Too few socket activated files passed in") + } + + if addr == "*" { + return listeners, nil + } + + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/pkg/systemd/sd_notify.go b/pkg/systemd/sd_notify.go new file mode 100644 index 00000000..1993cab9 --- /dev/null +++ b/pkg/systemd/sd_notify.go @@ -0,0 +1,33 @@ +package systemd + +import ( + "errors" + "net" + "os" +) + +var SdNotifyNoSocket = errors.New("No socket") + +// Send a message to the init daemon. It is common to ignore the error. +func SdNotify(state string) error { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + if socketAddr.Name == "" { + return SdNotifyNoSocket + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + if err != nil { + return err + } + + _, err = conn.Write([]byte(state)) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/tailfile/tailfile.go b/pkg/tailfile/tailfile.go new file mode 100644 index 00000000..2ffd36d2 --- /dev/null +++ b/pkg/tailfile/tailfile.go @@ -0,0 +1,61 @@ +package tailfile + +import ( + "bytes" + "errors" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") +var ErrNonPositiveLinesNumber = errors.New("Lines number must be positive") + +//TailFile returns last n lines of file f +func TailFile(f *os.File, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(step, os.SEEK_END); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/pkg/tailfile/tailfile_test.go b/pkg/tailfile/tailfile_test.go new file mode 100644 index 00000000..31217c03 --- /dev/null +++ b/pkg/tailfile/tailfile_test.go @@ -0,0 +1,148 @@ +package tailfile + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestTailFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +third line +fourth line +fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last first line +next first line +next second line +next third line +next fourth line +next fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last second line +last third line +last fourth line +last fifth line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"last fourth line", "last fifth line"} + res, err := TailFile(f, 2) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailFileManyLines(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"first line", "second line"} + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailEmptyFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + if len(res) != 0 { + t.Fatal("Must be empty slice from empty file") + } +} + +func TestTailNegativeN(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } + if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } +} + +func BenchmarkTail(b *testing.B) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + b.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + for i := 0; i < 10000; i++ { + if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := TailFile(f, 1000); err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/tarsum/MAINTAINER b/pkg/tarsum/MAINTAINER new file mode 100644 index 00000000..bd492e83 --- /dev/null +++ b/pkg/tarsum/MAINTAINER @@ -0,0 +1 @@ +Eric Windisch (@ewindisch) diff --git a/pkg/tarsum/fileinfosums.go b/pkg/tarsum/fileinfosums.go new file mode 100644 index 00000000..f9f46809 --- /dev/null +++ b/pkg/tarsum/fileinfosums.go @@ -0,0 +1,125 @@ +package tarsum + +import "sort" + +// This info will be accessed through interface so the actual name and sum cannot be medled with +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +func (fis FileInfoSums) Len() int { return len(fis) } +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/pkg/tarsum/fileinfosums_test.go b/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 00000000..e1c6cc12 --- /dev/null +++ b/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,45 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } +} diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go new file mode 100644 index 00000000..6581f3f2 --- /dev/null +++ b/pkg/tarsum/tarsum.go @@ -0,0 +1,285 @@ +package tarsum + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "hash" + "io" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/log" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + if _, ok := tarSumVersions[v]; !ok { + return nil, ErrVersionNotImplemented + } + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v}, nil +} + +// Create a new TarSum, providing a THash to use rather than the DefaultTHash +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + if _, ok := tarSumVersions[v]; !ok { + return nil, ErrVersionNotImplemented + } + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, tHash: tHash}, nil +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// A hash.Hash type generator and its name +type THash interface { + Hash() hash.Hash + Name() string +} + +// Convenience method for creating a THash +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +// TarSum default is "sha256" +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts tarSum) selectHeaders(h *tar.Header, v Version) (set [][2]string) { + for _, elem := range [][2]string{ + {"name", h.Name}, + {"mode", strconv.Itoa(int(h.Mode))}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.Itoa(int(h.Size))}, + {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.Itoa(int(h.Devmajor))}, + {"devminor", strconv.Itoa(int(h.Devminor))}, + } { + if v >= VersionDev && elem[0] == "mtime" { + continue + } + set = append(set, elem) + } + return +} + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.selectHeaders(h, ts.Version()) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + + // include the additional pax headers, from an ordered list + if ts.Version() >= VersionDev { + var keys []string + for k := range h.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + if _, err := ts.h.Write([]byte(k + h.Xattrs[k])); err != nil { + return err + } + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.writer == nil { + if err := ts.initTarSum(); err != nil { + return 0, err + } + } + + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writter + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + log.Debugf("-->%s<--", fis.Sum()) + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + log.Debugf("checksum processed: %s", checksum) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go new file mode 100644 index 00000000..1e06cda1 --- /dev/null +++ b/pkg/tarsum/tarsum_test.go @@ -0,0 +1,408 @@ +package tarsum + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:486b86e25c4db4551228154848bc4663b15dd95784b1588980f4ba1cb42e83e9"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:e86f81a4d552f13039b1396ed03ca968ea9717581f9577ef1876ea6ff9b38c98"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6235cd3a2afb7501bac541772a3d61a3634e95bc90bb39a4676e2cb98d08390d"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:ac672ee85da9ab7f9667ae3c32841d3e42f33cc52c273c23341dabba1c8b0c8b"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + } +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(buf, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(buf, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 00000000..0f0ba497 --- /dev/null +++ b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000000000000000000000000000000000..dfd5c204aea77673f13fdd2f81cb4af1c155c00c GIT binary patch literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg literal 0 HcmV?d00001 diff --git a/pkg/tarsum/testdata/collision/collision-2.tar b/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b5c04a9644808851fcccab5c3c240bf342abd93 GIT binary patch literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& literal 0 HcmV?d00001 diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go new file mode 100644 index 00000000..e1161fc5 --- /dev/null +++ b/pkg/tarsum/versioning.go @@ -0,0 +1,56 @@ +package tarsum + +import ( + "errors" + "strings" +) + +// versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +const ( + // Prefix of "tarsum" + Version0 Version = iota + // Prefix of "tarsum.dev" + // NOTE: this variable will be of an unsettled next-version of the TarSum calculation + VersionDev +) + +// Get a list of all known tarsum Version +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var tarSumVersions = map[Version]string{ + 0: "tarsum", + 1: "tarsum.dev", +} + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) diff --git a/pkg/tarsum/versioning_test.go b/pkg/tarsum/versioning_test.go new file mode 100644 index 00000000..b851c3be --- /dev/null +++ b/pkg/tarsum/versioning_test.go @@ -0,0 +1,49 @@ +package tarsum + +import ( + "testing" +) + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} diff --git a/pkg/tarsum/writercloser.go b/pkg/tarsum/writercloser.go new file mode 100644 index 00000000..9727ecde --- /dev/null +++ b/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/pkg/term/MAINTAINERS b/pkg/term/MAINTAINERS new file mode 100644 index 00000000..aee10c84 --- /dev/null +++ b/pkg/term/MAINTAINERS @@ -0,0 +1 @@ +Solomon Hykes (@shykes) diff --git a/pkg/term/term.go b/pkg/term/term.go new file mode 100644 index 00000000..ea94b44a --- /dev/null +++ b/pkg/term/term.go @@ -0,0 +1,103 @@ +package term + +import ( + "errors" + "os" + "os/signal" + "syscall" + "unsafe" +) + +var ( + ErrInvalidState = errors.New("Invalid terminal state") +) + +type State struct { + termios Termios +} + +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&termios))) + return err == 0 +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&state.termios))) + if err != 0 { + return err + } + return nil +} + +func SaveState(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + return &oldState, nil +} + +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= syscall.ECHO + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/pkg/term/termios_darwin.go b/pkg/term/termios_darwin.go new file mode 100644 index 00000000..11cd70d1 --- /dev/null +++ b/pkg/term/termios_darwin.go @@ -0,0 +1,65 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]byte + Ispeed uint64 + Ospeed uint64 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go new file mode 100644 index 00000000..ed365957 --- /dev/null +++ b/pkg/term/termios_freebsd.go @@ -0,0 +1,65 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/pkg/term/termios_linux.go b/pkg/term/termios_linux.go new file mode 100644 index 00000000..4a717c84 --- /dev/null +++ b/pkg/term/termios_linux.go @@ -0,0 +1,44 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TCGETS + setTermios = syscall.TCSETS +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/pkg/testutils/MAINTAINERS b/pkg/testutils/MAINTAINERS new file mode 100644 index 00000000..f2e8c52e --- /dev/null +++ b/pkg/testutils/MAINTAINERS @@ -0,0 +1,2 @@ +Solomon Hykes (@shykes) +Cristian Staretu (@unclejack) diff --git a/pkg/testutils/README.md b/pkg/testutils/README.md new file mode 100644 index 00000000..a208a90e --- /dev/null +++ b/pkg/testutils/README.md @@ -0,0 +1,2 @@ +`testutils` is a collection of utility functions to facilitate the writing +of tests. It is used in various places by the Docker test suite. diff --git a/pkg/testutils/utils.go b/pkg/testutils/utils.go new file mode 100644 index 00000000..9c664ff2 --- /dev/null +++ b/pkg/testutils/utils.go @@ -0,0 +1,37 @@ +package testutils + +import ( + "math/rand" + "testing" + "time" +) + +const chars = "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + +// Timeout calls f and waits for 100ms for it to complete. +// If it doesn't, it causes the tests to fail. +// t must be a valid testing context. +func Timeout(t *testing.T, f func()) { + onTimeout := time.After(100 * time.Millisecond) + onDone := make(chan bool) + go func() { + f() + close(onDone) + }() + select { + case <-onTimeout: + t.Fatalf("timeout") + case <-onDone: + } +} + +// RandomString returns random string of specified length +func RandomString(length int) string { + res := make([]byte, length) + for i := 0; i < length; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} diff --git a/pkg/timeutils/MAINTAINERS b/pkg/timeutils/MAINTAINERS new file mode 100644 index 00000000..6dde4769 --- /dev/null +++ b/pkg/timeutils/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/pkg/timeutils/json.go b/pkg/timeutils/json.go new file mode 100644 index 00000000..19f107bf --- /dev/null +++ b/pkg/timeutils/json.go @@ -0,0 +1,23 @@ +package timeutils + +import ( + "errors" + "time" +) + +const ( + // Define our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +func FastMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/pkg/truncindex/MAINTAINERS b/pkg/truncindex/MAINTAINERS new file mode 100644 index 00000000..6dde4769 --- /dev/null +++ b/pkg/truncindex/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go new file mode 100644 index 00000000..89aa88d6 --- /dev/null +++ b/pkg/truncindex/truncindex.go @@ -0,0 +1,106 @@ +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + ErrNoID = errors.New("prefix can't be empty") +) + +func init() { + // Change patricia max prefix per node length, + // because our len(ID) always 64 + patricia.MaxPrefixPerNode = 64 +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + trie: patricia.NewTrie(), + } + for _, id := range ids { + idx.addId(id) + } + return +} + +func (idx *TruncIndex) addId(id string) error { + if strings.Contains(id, " ") { + return fmt.Errorf("Illegal character: ' '") + } + if id == "" { + return ErrNoID + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("Id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("Failed to insert id: %s", id) + } + return nil +} + +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addId(id); err != nil { + return err + } + return nil +} + +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("No such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("No such id: '%s'", id) + } + return nil +} + +func (idx *TruncIndex) Get(s string) (string, error) { + idx.RLock() + defer idx.RUnlock() + var ( + id string + ) + if s == "" { + return "", ErrNoID + } + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return fmt.Errorf("we've found two entries") + } + id = string(prefix) + return nil + } + + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", fmt.Errorf("No such id: %s", s) + } + if id != "" { + return id, nil + } + return "", fmt.Errorf("No such id: %s", s) +} diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go new file mode 100644 index 00000000..32c41c7d --- /dev/null +++ b/pkg/truncindex/truncindex_test.go @@ -0,0 +1,401 @@ +package truncindex + +import ( + "math/rand" + "testing" + + "github.com/docker/docker/utils" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff --git a/pkg/units/MAINTAINERS b/pkg/units/MAINTAINERS new file mode 100644 index 00000000..68a97d2f --- /dev/null +++ b/pkg/units/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/pkg/units/duration.go b/pkg/units/duration.go new file mode 100644 index 00000000..cd331214 --- /dev/null +++ b/pkg/units/duration.go @@ -0,0 +1,31 @@ +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.) +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%f years", d.Hours()/24/365) +} diff --git a/pkg/units/duration_test.go b/pkg/units/duration_test.go new file mode 100644 index 00000000..a2294740 --- /dev/null +++ b/pkg/units/duration_test.go @@ -0,0 +1,46 @@ +package units + +import ( + "testing" + "time" +) + +func TestHumanDuration(t *testing.T) { + // Useful duration abstractions + day := 24 * time.Hour + week := 7 * day + month := 30 * day + year := 365 * day + + assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) + assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) + assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) + assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) + assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) + assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) + assertEquals(t, "2 days", HumanDuration(2*day)) + assertEquals(t, "7 days", HumanDuration(7*day)) + assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) + assertEquals(t, "2 weeks", HumanDuration(2*week)) + assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) + assertEquals(t, "3 weeks", HumanDuration(3*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) + assertEquals(t, "4 weeks", HumanDuration(1*month)) + assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) + assertEquals(t, "8 weeks", HumanDuration(2*month)) + assertEquals(t, "3 months", HumanDuration(3*month+1*week)) + assertEquals(t, "5 months", HumanDuration(5*month+2*week)) + assertEquals(t, "13 months", HumanDuration(13*month)) + assertEquals(t, "23 months", HumanDuration(23*month)) + assertEquals(t, "24 months", HumanDuration(24*month)) + assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week)) + assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month)) +} diff --git a/pkg/units/size.go b/pkg/units/size.go new file mode 100644 index 00000000..ea39bbdd --- /dev/null +++ b/pkg/units/size.go @@ -0,0 +1,81 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) +) + +var unitAbbrs = [...]string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + +// HumanSize returns a human-readable approximation of a size +// using SI standard (eg. "44kB", "17MB") +func HumanSize(size int64) string { + i := 0 + sizef := float64(size) + for sizef >= 1000.0 { + sizef = sizef / 1000.0 + i++ + } + return fmt.Sprintf("%.4g %s", sizef, unitAbbrs[i]) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB") +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// Parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 3 { + return -1, fmt.Errorf("Invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[2]) + if mul, ok := uMap[unitPrefix]; ok { + size *= mul + } + + return size, nil +} diff --git a/pkg/units/size_test.go b/pkg/units/size_test.go new file mode 100644 index 00000000..8dae7e71 --- /dev/null +++ b/pkg/units/size_test.go @@ -0,0 +1,98 @@ +package units + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func TestHumanSize(t *testing.T) { + assertEquals(t, "1 kB", HumanSize(1000)) + assertEquals(t, "1.024 kB", HumanSize(1024)) + assertEquals(t, "1 MB", HumanSize(1000000)) + assertEquals(t, "1.049 MB", HumanSize(1048576)) + assertEquals(t, "2 MB", HumanSize(2*MB)) + assertEquals(t, "3.42 GB", HumanSize(3.42*GB)) + assertEquals(t, "5.372 TB", HumanSize(5.372*TB)) + assertEquals(t, "2.22 PB", HumanSize(2.22*PB)) +} + +func TestFromHumanSize(t *testing.T) { + assertSuccessEquals(t, 32, FromHumanSize, "32") + assertSuccessEquals(t, 32, FromHumanSize, "32b") + assertSuccessEquals(t, 32, FromHumanSize, "32B") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") + assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") + assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") + assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") + assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") + + assertError(t, FromHumanSize, "") + assertError(t, FromHumanSize, "hello") + assertError(t, FromHumanSize, "-32") + assertError(t, FromHumanSize, "32.3") + assertError(t, FromHumanSize, " 32 ") + assertError(t, FromHumanSize, "32.3Kb") + assertError(t, FromHumanSize, "32 mb") + assertError(t, FromHumanSize, "32m b") + assertError(t, FromHumanSize, "32bm") +} + +func TestRAMInBytes(t *testing.T) { + assertSuccessEquals(t, 32, RAMInBytes, "32") + assertSuccessEquals(t, 32, RAMInBytes, "32b") + assertSuccessEquals(t, 32, RAMInBytes, "32B") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") + assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") + assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") + assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") + + assertError(t, RAMInBytes, "") + assertError(t, RAMInBytes, "hello") + assertError(t, RAMInBytes, "-32") + assertError(t, RAMInBytes, "32.3") + assertError(t, RAMInBytes, " 32 ") + assertError(t, RAMInBytes, "32.3Kb") + assertError(t, RAMInBytes, "32 mb") + assertError(t, RAMInBytes, "32m b") + assertError(t, RAMInBytes, "32bm") +} + +func assertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +// func that maps to the parse function signatures as testing abstraction +type parseFn func(string) (int64, error) + +// Define 'String()' for pretty-print +func (fn parseFn) String() string { + fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() + return fnName[strings.LastIndex(fnName, ".")+1:] +} + +func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { + res, err := fn(arg) + if err != nil || res != expected { + t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) + } +} + +func assertError(t *testing.T, fn parseFn, arg string) { + res, err := fn(arg) + if err == nil && res != -1 { + t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) + } +} diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 00000000..6a7d6354 --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,57 @@ +package version + +import ( + "strconv" + "strings" +) + +type Version string + +func (me Version) compareTo(other Version) int { + var ( + meTab = strings.Split(string(me), ".") + otherTab = strings.Split(string(other), ".") + ) + + max := len(meTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var meInt, otherInt int + + if len(meTab) > i { + meInt, _ = strconv.Atoi(meTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if meInt > otherInt { + return 1 + } + if otherInt > meInt { + return -1 + } + } + return 0 +} + +func (me Version) LessThan(other Version) bool { + return me.compareTo(other) == -1 +} + +func (me Version) LessThanOrEqualTo(other Version) bool { + return me.compareTo(other) <= 0 +} + +func (me Version) GreaterThan(other Version) bool { + return me.compareTo(other) == 1 +} + +func (me Version) GreaterThanOrEqualTo(other Version) bool { + return me.compareTo(other) >= 0 +} + +func (me Version) Equal(other Version) bool { + return me.compareTo(other) == 0 +} diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go new file mode 100644 index 00000000..c02ec40f --- /dev/null +++ b/pkg/version/version_test.go @@ -0,0 +1,27 @@ +package version + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := Version(a).compareTo(Version(b)); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) + +} diff --git a/registry/MAINTAINERS b/registry/MAINTAINERS new file mode 100644 index 00000000..fdb03ed5 --- /dev/null +++ b/registry/MAINTAINERS @@ -0,0 +1,5 @@ +Sam Alba (@samalba) +Joffrey Fuhrer (@shin-) +Ken Cochrane (@kencochrane) +Vincent Batts (@vbatts) +Olivier Gambier (@dmp42) diff --git a/registry/auth.go b/registry/auth.go new file mode 100644 index 00000000..dad58c16 --- /dev/null +++ b/registry/auth.go @@ -0,0 +1,310 @@ +package registry + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "strings" + + "github.com/docker/docker/utils" +) + +const ( + // Where we store the config file + CONFIGFILE = ".dockercfg" + + // Only used for user auth + account creation + INDEXSERVER = "https://index.docker.io/v1/" + REGISTRYSERVER = "https://registry-1.docker.io/v1/" + + // INDEXSERVER = "https://registry-stage.hub.docker.com/v1/" +) + +var ( + ErrConfigFileMissing = errors.New("The Auth config file is missing") + IndexServerURL *url.URL +) + +func init() { + url, err := url.Parse(INDEXSERVER) + if err != nil { + panic(err) + } + IndexServerURL = url +} + +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth"` + Email string `json:"email"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +type ConfigFile struct { + Configs map[string]AuthConfig `json:"configs,omitempty"` + rootPath string +} + +func IndexServerAddress() string { + return INDEXSERVER +} + +// create a base64 encoded auth string to store in config +func encodeAuth(authConfig *AuthConfig) string { + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decode the auth string +func decodeAuth(authStr string) (string, string, error) { + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} + +// load up the auth config information and return values +// FIXME: use the internal golang config parser +func LoadConfig(rootPath string) (*ConfigFile, error) { + configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} + confFile := path.Join(rootPath, CONFIGFILE) + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + b, err := ioutil.ReadFile(confFile) + if err != nil { + return &configFile, err + } + + if err := json.Unmarshal(b, &configFile.Configs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return &configFile, fmt.Errorf("The Auth config file is empty") + } + authConfig := AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return &configFile, err + } + origEmail := strings.Split(arr[1], " = ") + if len(origEmail) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Email = origEmail[1] + authConfig.ServerAddress = IndexServerAddress() + configFile.Configs[IndexServerAddress()] = authConfig + } else { + for k, authConfig := range configFile.Configs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return &configFile, err + } + authConfig.Auth = "" + configFile.Configs[k] = authConfig + authConfig.ServerAddress = k + } + } + return &configFile, nil +} + +// save the auth config +func SaveConfig(configFile *ConfigFile) error { + confFile := path.Join(configFile.rootPath, CONFIGFILE) + if len(configFile.Configs) == 0 { + os.Remove(confFile) + return nil + } + + configs := make(map[string]AuthConfig, len(configFile.Configs)) + for k, authConfig := range configFile.Configs { + authCopy := authConfig + + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + configs[k] = authCopy + } + + b, err := json.Marshal(configs) + if err != nil { + return err + } + err = ioutil.WriteFile(confFile, b, 0600) + if err != nil { + return err + } + return nil +} + +// try to register/login to the registry server +func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, error) { + var ( + status string + reqBody []byte + err error + client = &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + Proxy: http.ProxyFromEnvironment, + }, + CheckRedirect: AddRequiredHeadersToRedirectedRequests, + } + reqStatusCode = 0 + serverAddress = authConfig.ServerAddress + ) + + if serverAddress == "" { + serverAddress = IndexServerAddress() + } + + loginAgainstOfficialIndex := serverAddress == IndexServerAddress() + + // to avoid sending the server address to the server it should be removed before being marshalled + authCopy := *authConfig + authCopy.ServerAddress = "" + + jsonBody, err := json.Marshal(authCopy) + if err != nil { + return "", fmt.Errorf("Config Error: %s", err) + } + + // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. + b := strings.NewReader(string(jsonBody)) + req1, err := http.Post(serverAddress+"users/", "application/json; charset=utf-8", b) + if err != nil { + return "", fmt.Errorf("Server Error: %s", err) + } + reqStatusCode = req1.StatusCode + defer req1.Body.Close() + reqBody, err = ioutil.ReadAll(req1.Body) + if err != nil { + return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) + } + + if reqStatusCode == 201 { + if loginAgainstOfficialIndex { + status = "Account created. Please use the confirmation link we sent" + + " to your e-mail to activate it." + } else { + status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." + } + } else if reqStatusCode == 400 { + if string(reqBody) == "\"Username or email already exists\"" { + req, err := factory.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + status = "Login Succeeded" + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == 403 { + if loginAgainstOfficialIndex { + return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") + } + return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Registration: %s", reqBody) + } + } else if reqStatusCode == 401 { + // This case would happen with private registries where /v1/users is + // protected, so people can use `docker login` as an auth check. + req, err := factory.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + status = "Login Succeeded" + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) + } + return status, nil +} + +// this method matches a auth configuration to a server address or a url +func (config *ConfigFile) ResolveAuthConfig(hostname string) AuthConfig { + if hostname == IndexServerAddress() || len(hostname) == 0 { + // default to the index server + return config.Configs[IndexServerAddress()] + } + + // First try the happy case + if c, found := config.Configs[hostname]; found { + return c + } + + convertToHostname := func(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + normalizedHostename := convertToHostname(hostname) + for registry, config := range config.Configs { + if registryHostname := convertToHostname(registry); registryHostname == normalizedHostename { + return config + } + } + + // When all else fails, return an empty auth config + return AuthConfig{} +} diff --git a/registry/auth_test.go b/registry/auth_test.go new file mode 100644 index 00000000..3cb1a9ac --- /dev/null +++ b/registry/auth_test.go @@ -0,0 +1,149 @@ +package registry + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} + authStr := encodeAuth(newAuthConfig) + decAuthConfig := &AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} + +func setupTempConfigFile() (*ConfigFile, error) { + root, err := ioutil.TempDir("", "docker-test-auth") + if err != nil { + return nil, err + } + configFile := &ConfigFile{ + rootPath: root, + Configs: make(map[string]AuthConfig), + } + + for _, registry := range []string{"testIndex", IndexServerAddress()} { + configFile.Configs[registry] = AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + Email: "docker@docker.io", + } + } + + return configFile, nil +} + +func TestSameAuthDataPostSave(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + err = SaveConfig(configFile) + if err != nil { + t.Fatal(err) + } + + authConfig := configFile.Configs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Email != "docker@docker.io" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + for _, registry := range []string{"", IndexServerAddress()} { + resolved := configFile.ResolveAuthConfig(registry) + if resolved != configFile.Configs[IndexServerAddress()] { + t.Fail() + } + } +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + registryAuth := AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + Email: "foo@example.com", + } + localAuth := AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + Email: "bar@example.com", + } + configFile.Configs["https://registry.example.com/v1/"] = registryAuth + configFile.Configs["http://localhost:8000/v1/"] = localAuth + configFile.Configs["registry.com"] = registryAuth + + validRegistries := map[string][]string{ + "https://registry.example.com/v1/": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "http://localhost:8000/v1/": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + for _, registry := range registries { + var ( + configured AuthConfig + ok bool + ) + resolved := configFile.ResolveAuthConfig(registry) + if configured, ok = configFile.Configs[configKey]; !ok { + t.Fail() + } + if resolved.Email != configured.Email { + t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) + } + } + } +} diff --git a/registry/endpoint.go b/registry/endpoint.go new file mode 100644 index 00000000..d65fd7e8 --- /dev/null +++ b/registry/endpoint.go @@ -0,0 +1,213 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/pkg/log" +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// scans string for api version in the URL path. returns the trimmed hostname, if version found, string and API version. +func scanForApiVersion(hostname string) (string, APIVersion) { + var ( + chunks []string + apiVersionStr string + ) + if strings.HasSuffix(hostname, "/") { + chunks = strings.Split(hostname[:len(hostname)-1], "/") + apiVersionStr = chunks[len(chunks)-1] + } else { + chunks = strings.Split(hostname, "/") + apiVersionStr = chunks[len(chunks)-1] + } + for k, v := range apiVersions { + if apiVersionStr == v { + hostname = strings.Join(chunks[:len(chunks)-1], "/") + return hostname, k + } + } + return hostname, DefaultAPIVersion +} + +func NewEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error) { + endpoint, err := newEndpoint(hostname, insecureRegistries) + if err != nil { + return nil, err + } + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + + //TODO: triggering highland build can be done there without "failing" + + if endpoint.secure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return nil, fmt.Errorf("Invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + log.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + _, err2 := endpoint.Ping() + if err2 == nil { + return endpoint, nil + } + + return nil, fmt.Errorf("Invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return endpoint, nil +} +func newEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error) { + var ( + endpoint = Endpoint{} + trimmedHostname string + err error + ) + if !strings.HasPrefix(hostname, "http") { + hostname = "https://" + hostname + } + trimmedHostname, endpoint.Version = scanForApiVersion(hostname) + endpoint.URL, err = url.Parse(trimmedHostname) + if err != nil { + return nil, err + } + endpoint.secure, err = isSecure(endpoint.URL.Host, insecureRegistries) + if err != nil { + return nil, err + } + return &endpoint, nil +} + +type Endpoint struct { + URL *url.URL + Version APIVersion + secure bool +} + +// Get the formated URL for the root of this registry Endpoint +func (e Endpoint) String() string { + return fmt.Sprintf("%s/v%d/", e.URL.String(), e.Version) +} + +func (e Endpoint) VersionString(version APIVersion) string { + return fmt.Sprintf("%s/v%d/", e.URL.String(), version) +} + +func (e Endpoint) Ping() (RegistryInfo, error) { + if e.String() == IndexServerAddress() { + // Skip the check, we now this one is valid + // (and we never want to fallback to http in case of error) + return RegistryInfo{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.String()+"_ping", nil) + if err != nil { + return RegistryInfo{Standalone: false}, err + } + + resp, _, err := doRequest(req, nil, ConnectTimeout, e.secure) + if err != nil { + return RegistryInfo{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return RegistryInfo{Standalone: false}, fmt.Errorf("Error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := RegistryInfo{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + log.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + log.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + log.Debugf("RegistryInfo.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + log.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + log.Debugf("RegistryInfo.Standalone: %t", info.Standalone) + return info, nil +} + +// isSecure returns false if the provided hostname is part of the list of insecure registries. +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by hostname, the latter is considered +// insecure. +// +// hostname should be a URL.Host (`host:port` or `host`) +func isSecure(hostname string, insecureRegistries []string) (bool, error) { + if hostname == IndexServerURL.Host { + return true, nil + } + + host, _, err := net.SplitHostPort(hostname) + if err != nil { + // assume hostname is of the form `host` without the port and go on. + host = hostname + } + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip == nil { + // if resolving `host` fails, error out, since host is to be net.Dial-ed anyway + return true, fmt.Errorf("issecure: could not resolve %q: %v", host, err) + } + addrs = []net.IP{ip} + } + if len(addrs) == 0 { + return true, fmt.Errorf("issecure: could not resolve %q", host) + } + + for _, addr := range addrs { + for _, r := range insecureRegistries { + // hostname matches insecure registry + if hostname == r { + return false, nil + } + + // now assume a CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err != nil { + // if could not parse it as a CIDR, even after removing + // assume it's not a CIDR and go on with the next candidate + continue + } + + // check if the addr falls in the subnet + if ipnet.Contains(addr) { + return false, nil + } + } + } + + return true, nil +} diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go new file mode 100644 index 00000000..54105ec1 --- /dev/null +++ b/registry/endpoint_test.go @@ -0,0 +1,27 @@ +package registry + +import "testing" + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServerAddress(), IndexServerAddress()}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, + } + for _, td := range testData { + e, err := newEndpoint(td.str, insecureRegistries) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} diff --git a/registry/httpfactory.go b/registry/httpfactory.go new file mode 100644 index 00000000..4c784360 --- /dev/null +++ b/registry/httpfactory.go @@ -0,0 +1,46 @@ +package registry + +import ( + "runtime" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/utils" +) + +func HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { + // FIXME: this replicates the 'info' job. + httpVersion := make([]utils.VersionInfo, 0, 4) + httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) + httpVersion = append(httpVersion, &simpleVersionInfo{"go", runtime.Version()}) + httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) + } + httpVersion = append(httpVersion, &simpleVersionInfo{"os", runtime.GOOS}) + httpVersion = append(httpVersion, &simpleVersionInfo{"arch", runtime.GOARCH}) + ud := utils.NewHTTPUserAgentDecorator(httpVersion...) + md := &utils.HTTPMetaHeadersDecorator{ + Headers: metaHeaders, + } + factory := utils.NewHTTPRequestFactory(ud, md) + return factory +} + +// simpleVersionInfo is a simple implementation of +// the interface VersionInfo, which is used +// to provide version information for some product, +// component, etc. It stores the product name and the version +// in string and returns them on calls to Name() and Version(). +type simpleVersionInfo struct { + name string + version string +} + +func (v *simpleVersionInfo) Name() string { + return v.name +} + +func (v *simpleVersionInfo) Version() string { + return v.version +} diff --git a/registry/registry.go b/registry/registry.go new file mode 100644 index 00000000..e0285a23 --- /dev/null +++ b/registry/registry.go @@ -0,0 +1,253 @@ +package registry + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" +) + +var ( + ErrAlreadyExists = errors.New("Image already exists") + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + ErrDoesNotExist = errors.New("Image does not exist") + errLoginRequired = errors.New("Authentication is required.") + validNamespace = regexp.MustCompile(`^([a-z0-9_]{4,30})$`) + validRepo = regexp.MustCompile(`^([a-z0-9-_.]+)$`) +) + +type TimeoutType uint32 + +const ( + NoTimeout TimeoutType = iota + ReceiveTimeout + ConnectTimeout +) + +func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, timeout TimeoutType, secure bool) *http.Client { + tlsConfig := tls.Config{ + RootCAs: roots, + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + } + + if cert != nil { + tlsConfig.Certificates = append(tlsConfig.Certificates, *cert) + } + + if !secure { + tlsConfig.InsecureSkipVerify = true + } + + httpTransport := &http.Transport{ + DisableKeepAlives: true, + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tlsConfig, + } + + switch timeout { + case ConnectTimeout: + httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { + // Set the connect timeout to 5 seconds + conn, err := net.DialTimeout(proto, addr, 5*time.Second) + if err != nil { + return nil, err + } + // Set the recv timeout to 10 seconds + conn.SetDeadline(time.Now().Add(10 * time.Second)) + return conn, nil + } + case ReceiveTimeout: + httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { + conn, err := net.Dial(proto, addr) + if err != nil { + return nil, err + } + conn = utils.NewTimeoutConn(conn, 1*time.Minute) + return conn, nil + } + } + + return &http.Client{ + Transport: httpTransport, + CheckRedirect: AddRequiredHeadersToRedirectedRequests, + Jar: jar, + } +} + +func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secure bool) (*http.Response, *http.Client, error) { + var ( + pool *x509.CertPool + certs []*tls.Certificate + ) + + if secure && req.URL.Scheme == "https" { + hasFile := func(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false + } + + hostDir := path.Join("/etc/docker/certs.d", req.URL.Host) + log.Debugf("hostDir: %s", hostDir) + fs, err := ioutil.ReadDir(hostDir) + if err != nil && !os.IsNotExist(err) { + return nil, nil, err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if pool == nil { + pool = x509.NewCertPool() + } + log.Debugf("crt: %s", hostDir+"/"+f.Name()) + data, err := ioutil.ReadFile(path.Join(hostDir, f.Name())) + if err != nil { + return nil, nil, err + } + pool.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + log.Debugf("cert: %s", hostDir+"/"+f.Name()) + if !hasFile(fs, keyName) { + return nil, nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(path.Join(hostDir, certName), path.Join(hostDir, keyName)) + if err != nil { + return nil, nil, err + } + certs = append(certs, &cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + log.Debugf("key: %s", hostDir+"/"+f.Name()) + if !hasFile(fs, certName) { + return nil, nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + } + } + } + } + + if len(certs) == 0 { + client := newClient(jar, pool, nil, timeout, secure) + res, err := client.Do(req) + if err != nil { + return nil, nil, err + } + return res, client, nil + } + + for i, cert := range certs { + client := newClient(jar, pool, cert, timeout, secure) + res, err := client.Do(req) + // If this is the last cert, otherwise, continue to next cert if 403 or 5xx + if i == len(certs)-1 || err == nil && res.StatusCode != 403 && res.StatusCode < 500 { + return res, client, err + } + } + + return nil, nil, nil +} + +func validateRepositoryName(repositoryName string) error { + var ( + namespace string + name string + ) + nameParts := strings.SplitN(repositoryName, "/", 2) + if len(nameParts) < 2 { + namespace = "library" + name = nameParts[0] + + // the repository name must not be a valid image ID + if err := utils.ValidateID(name); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } + } else { + namespace = nameParts[0] + name = nameParts[1] + } + if !validNamespace.MatchString(namespace) { + return fmt.Errorf("Invalid namespace name (%s), only [a-z0-9_] are allowed, size between 4 and 30", namespace) + } + if !validRepo.MatchString(name) { + return fmt.Errorf("Invalid repository name (%s), only [a-z0-9-_.] are allowed", name) + } + return nil +} + +// Resolves a repository name to a hostname + name +func ResolveRepositoryName(reposName string) (string, string, error) { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return "", "", ErrInvalidRepositoryName + } + nameParts := strings.SplitN(reposName, "/", 2) + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && !strings.Contains(nameParts[0], ":") && + nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + err := validateRepositoryName(reposName) + return IndexServerAddress(), reposName, err + } + hostname := nameParts[0] + reposName = nameParts[1] + if strings.Contains(hostname, "index.docker.io") { + return "", "", fmt.Errorf("Invalid repository name, try \"%s\" instead", reposName) + } + if err := validateRepositoryName(reposName); err != nil { + return "", "", err + } + + return hostname, reposName, nil +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +func AddRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go new file mode 100644 index 00000000..50724f0f --- /dev/null +++ b/registry/registry_mock_test.go @@ -0,0 +1,394 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" + + "github.com/docker/docker/pkg/log" +) + +var ( + testHTTPServer *httptest.Server + insecureRegistries []string + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "PortSpecs":null,"Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "PortSpecs":null,"Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + URL, err := url.Parse(testHTTPServer.URL) + if err != nil { + panic(err) + } + insecureRegistries = []string{URL.Host} + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + log.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHTTPServer.URL + req +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layer_size := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layer_size)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + image_id := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[image_id] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[image_id] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName := mux.Vars(r)["repository"] + tags, exists := testRepositories[repositoryName] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName := vars["repository"] + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName := vars["repository"] + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName] + if !exists { + tags := make(map[string]string) + testRepositories[repositoryName] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for image_id, layer := range testLayers { + image := make(map[string]string) + image["id"] = image_id + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := &SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + log.Println("Test HTTP server ready and waiting:", testHttpServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/registry/registry_test.go b/registry/registry_test.go new file mode 100644 index 00000000..1ffb44f3 --- /dev/null +++ b/registry/registry_test.go @@ -0,0 +1,355 @@ +package registry + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/docker/docker/utils" +) + +var ( + IMAGE_ID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + TOKEN = []string{"fake-token"} + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &AuthConfig{} + endpoint, err := NewEndpoint(makeURL("/v1/"), insecureRegistries) + if err != nil { + t.Fatal(err) + } + r, err := NewSession(authConfig, utils.NewHTTPRequestFactory(), endpoint, true) + if err != nil { + t.Fatal(err) + } + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + ep, err := NewEndpoint(makeURL("/v1/"), insecureRegistries) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + assertEqual(t, regInfo.Standalone, true, "Expected standalone to be true (default)") +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(IMAGE_ID, makeURL("/v1/"), TOKEN) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], IMAGE_ID, "Expected "+IMAGE_ID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + found := r.LookupRemoteImage(IMAGE_ID, makeURL("/v1/"), TOKEN) + assertEqual(t, found, true, "Expected remote lookup to succeed") + found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), TOKEN) + assertEqual(t, found, false, "Expected remote lookup to fail") +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(IMAGE_ID, makeURL("/v1/"), TOKEN) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, 154, "Expected size 154") + if len(json) <= 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), TOKEN) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN, 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN, 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, TOKEN) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 1, "Expected one tag") + assertEqual(t, tags["latest"], IMAGE_ID, "Expected tag latest to map to "+IMAGE_ID) + + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", TOKEN) + if err == nil { + t.Fatal("Expected error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedUrl, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedUrl.Host + "/v1/" + data, err := r.GetRepositoryData("foo42/bar") + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), TOKEN) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestResolveRepositoryName(t *testing.T) { + _, _, err := ResolveRepositoryName("https://github.com/docker/docker") + assertEqual(t, err, ErrInvalidRepositoryName, "Expected error invalid repo name") + ep, repo, err := ResolveRepositoryName("fooo/bar") + if err != nil { + t.Fatal(err) + } + assertEqual(t, ep, IndexServerAddress(), "Expected endpoint to be index server address") + assertEqual(t, repo, "fooo/bar", "Expected resolved repo to be foo/bar") + + u := makeURL("")[7:] + ep, repo, err = ResolveRepositoryName(u + "/private/moonbase") + if err != nil { + t.Fatal(err) + } + assertEqual(t, ep, u, "Expected endpoint to be "+u) + assertEqual(t, repo, "private/moonbase", "Expected endpoint to be private/moonbase") + + ep, repo, err = ResolveRepositoryName("ubuntu-12.04-base") + if err != nil { + t.Fatal(err) + } + assertEqual(t, ep, IndexServerAddress(), "Expected endpoint to be "+IndexServerAddress()) + assertEqual(t, repo, "ubuntu-12.04-base", "Expected endpoint to be ubuntu-12.04-base") +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.PushRegistryTag("foo42/bar", IMAGE_ID, "stable", makeURL("/v1/"), TOKEN) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery") + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' a ot hae 42 stars") +} + +func TestValidRepositoryName(t *testing.T) { + if err := validateRepositoryName("docker/docker"); err != nil { + t.Fatal(err) + } + // Support 64-byte non-hexadecimal names (hexadecimal names are forbidden) + if err := validateRepositoryName("thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev"); err != nil { + t.Fatal(err) + } + if err := validateRepositoryName("docker/Docker"); err == nil { + t.Log("Repository name should be invalid") + t.Fail() + } + if err := validateRepositoryName("docker///docker"); err == nil { + t.Log("Repository name should be invalid") + t.Fail() + } + if err := validateRepositoryName("1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a"); err == nil { + t.Log("Repository name should be invalid, 64-byte hexadecimal names forbidden") + t.Fail() + } +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestIsSecure(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexServerURL.Host, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + } + for _, tt := range tests { + // TODO: remove this once we remove localhost insecure by default + insecureRegistries := append(tt.insecureRegistries, "127.0.0.0/8") + if sec, err := isSecure(tt.addr, insecureRegistries); err != nil || sec != tt.expected { + t.Fatalf("isSecure failed for %q %v, expected %v got %v. Error: %v", tt.addr, insecureRegistries, tt.expected, sec, err) + } + } +} diff --git a/registry/service.go b/registry/service.go new file mode 100644 index 00000000..53e8278b --- /dev/null +++ b/registry/service.go @@ -0,0 +1,118 @@ +package registry + +import ( + "github.com/docker/docker/engine" +) + +// Service exposes registry capabilities in the standard Engine +// interface. Once installed, it extends the engine with the +// following calls: +// +// 'auth': Authenticate against the public registry +// 'search': Search for images on the public registry +// 'pull': Download images from any registry (TODO) +// 'push': Upload images to any registry (TODO) +type Service struct { + insecureRegistries []string +} + +// NewService returns a new instance of Service ready to be +// installed no an engine. +func NewService(insecureRegistries []string) *Service { + return &Service{ + insecureRegistries: insecureRegistries, + } +} + +// Install installs registry capabilities to eng. +func (s *Service) Install(eng *engine.Engine) error { + eng.Register("auth", s.Auth) + eng.Register("search", s.Search) + return nil +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was sucessful. +// It can be used to verify the validity of a client's credentials. +func (s *Service) Auth(job *engine.Job) engine.Status { + var authConfig = new(AuthConfig) + + job.GetenvJson("authConfig", authConfig) + + if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() { + endpoint, err := NewEndpoint(addr, s.insecureRegistries) + if err != nil { + return job.Error(err) + } + if _, err := endpoint.Ping(); err != nil { + return job.Error(err) + } + authConfig.ServerAddress = endpoint.String() + } + + status, err := Login(authConfig, HTTPRequestFactory(nil)) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", status) + + return engine.StatusOK +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +// +// Argument syntax: search TERM +// +// Option environment: +// 'authConfig': json-encoded credentials to authenticate against the registry. +// The search extends to images only accessible via the credentials. +// +// 'metaHeaders': extra HTTP headers to include in the request to the registry. +// The headers should be passed as a json-encoded dictionary. +// +// Output: +// Results are sent as a collection of structured messages (using engine.Table). +// Each result is sent as a separate message. +// Results are ordered by number of stars on the public registry. +func (s *Service) Search(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s TERM", job.Name) + } + var ( + term = job.Args[0] + metaHeaders = map[string][]string{} + authConfig = &AuthConfig{} + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) + + hostname, term, err := ResolveRepositoryName(term) + if err != nil { + return job.Error(err) + } + + endpoint, err := NewEndpoint(hostname, s.insecureRegistries) + if err != nil { + return job.Error(err) + } + r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true) + if err != nil { + return job.Error(err) + } + results, err := r.SearchRepositories(term) + if err != nil { + return job.Error(err) + } + outs := engine.NewTable("star_count", 0) + for _, result := range results.Results { + out := &engine.Env{} + out.Import(result) + outs.Add(out) + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/registry/session.go b/registry/session.go new file mode 100644 index 00000000..28959967 --- /dev/null +++ b/registry/session.go @@ -0,0 +1,617 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/utils" +) + +type Session struct { + authConfig *AuthConfig + reqFactory *utils.HTTPRequestFactory + indexEndpoint *Endpoint + jar *cookiejar.Jar + timeout TimeoutType +} + +func NewSession(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, endpoint *Endpoint, timeout bool) (r *Session, err error) { + r = &Session{ + authConfig: authConfig, + indexEndpoint: endpoint, + } + + if timeout { + r.timeout = ReceiveTimeout + } + + r.jar, err = cookiejar.New(nil) + if err != nil { + return nil, err + } + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside our requests. + if r.indexEndpoint.VersionString(1) != IndexServerAddress() && r.indexEndpoint.URL.Scheme == "https" { + info, err := r.indexEndpoint.Ping() + if err != nil { + return nil, err + } + if info.Standalone { + log.Debugf("Endpoint %s is eligible for private registry registry. Enabling decorator.", r.indexEndpoint.String()) + dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password) + factory.AddDecorator(dec) + } + } + + r.reqFactory = factory + return r, nil +} + +func (r *Session) doRequest(req *http.Request) (*http.Response, *http.Client, error) { + return doRequest(req, r.jar, r.timeout, r.indexEndpoint.secure) +} + +// Retrieve the history of a given image from the Registry. +// Return a list of the parent's json (requested image included) +func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) { + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("Error while reading the http response: %s", err) + } + + log.Debugf("Ancestry: %s", jsonString) + history := new([]string) + if err := json.Unmarshal(jsonString, history); err != nil { + return nil, err + } + return *history, nil +} + +// Check if an image exists in the Registry +// TODO: This method should return the errors instead of masking them and returning false +func (r *Session) LookupRemoteImage(imgID, registry string, token []string) bool { + + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) + if err != nil { + log.Errorf("Error in LookupRemoteImage %s", err) + return false + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + log.Errorf("Error in LookupRemoteImage %s", err) + return false + } + res.Body.Close() + return res.StatusCode == 200 +} + +// Retrieve an image from the Registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) { + // Get the JSON + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := -1 + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.Atoi(hdr) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) { + var ( + retries = 5 + statusCode = 0 + client *http.Client + res *http.Response + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := r.reqFactory.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %s\n", err) + } + setTokenAuth(req, token) + for i := 1; i <= retries; i++ { + statusCode = 0 + res, client, err = r.doRequest(req) + if err != nil { + log.Debugf("Error contacting registry: %s", err) + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + if i == retries { + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + time.Sleep(time.Duration(i) * 5 * time.Second) + continue + } + break + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + log.Debugf("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(client, req, 5, imgSize, res), nil + } + log.Debugf("server doesn't support resume") + return res.Body, nil +} + +func (r *Session) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) { + if strings.Count(repository, "/") == 0 { + // This will be removed once the Registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + req, err := r.reqFactory.NewRequest("GET", endpoint, nil) + + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + + log.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode != 200 && res.StatusCode != 404 { + continue + } else if res.StatusCode == 404 { + return nil, fmt.Errorf("Repository not found") + } + + result := make(map[string]string) + rawJSON, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + if err := json.Unmarshal(rawJSON, &result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedUrl, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedUrl.Scheme + // The Registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote) + + log.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + if r.authConfig != nil && len(r.authConfig.Username) > 0 { + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + } + req.Header.Set("X-Docker-Token", "true") + + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errLoginRequired + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } + + var tokens []string + if res.Header.Get("X-Docker-Token") != "" { + tokens = res.Header["X-Docker-Token"] + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + checksumsJSON, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + remoteChecksums := []*ImgData{} + if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + Tokens: tokens, + }, nil +} + +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum") + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil) + if err != nil { + return err + } + setTokenAuth(req, token) + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, _, err := r.doRequest(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody) + } + return nil +} + +// Push a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json") + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + setTokenAuth(req, token) + + res, _, err := r.doRequest(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody), res) + } + return nil +} + +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %s", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// push a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token []string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) + + req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + setTokenAuth(req, token) + req.ContentLength = int64(len(revision)) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) + } + return nil +} + +func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) + log.Debugf("[registry] PUT %s", u) + log.Debugf("Image list pushed to index:\n%s", imgListJSON) + req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON)) + if err != nil { + return nil, err + } + req.Header.Add("Content-type", "application/json") + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + req.ContentLength = int64(len(imgListJSON)) + req.Header.Set("X-Docker-Token", "true") + if validate { + req.Header["X-Docker-Endpoints"] = regs + } + + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + // Redirect if necessary + for res.StatusCode >= 300 && res.StatusCode < 400 { + log.Debugf("Redirected to %s", res.Header.Get("Location")) + req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) + if err != nil { + return nil, err + } + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + req.ContentLength = int64(len(imgListJSON)) + req.Header.Set("X-Docker-Token", "true") + if validate { + req.Header["X-Docker-Endpoints"] = regs + } + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody), res) + } + if res.Header.Get("X-Docker-Token") != "" { + tokens = res.Header["X-Docker-Token"] + log.Debugf("Auth token: %v", tokens) + } else { + return nil, fmt.Errorf("Index response didn't contain an access token") + } + + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + } + if validate { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody), res) + } + } + + return &RepositoryData{ + Tokens: tokens, + Endpoints: endpoints, + }, nil +} + +func (r *Session) SearchRepositories(term string) (*SearchResults, error) { + log.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) + req, err := r.reqFactory.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + if r.authConfig != nil && len(r.authConfig.Username) > 0 { + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + } + req.Header.Set("X-Docker-Token", "true") + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res) + } + rawData, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + result := new(SearchResults) + err = json.Unmarshal(rawData, result) + return result, err +} + +func (r *Session) GetAuthConfig(withPasswd bool) *AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &AuthConfig{ + Username: r.authConfig.Username, + Password: password, + Email: r.authConfig.Email, + } +} + +func setTokenAuth(req *http.Request, token []string) { + if req.Header.Get("Authorization") == "" { // Don't override + req.Header.Set("Authorization", "Token "+strings.Join(token, ",")) + } +} diff --git a/registry/session_v2.go b/registry/session_v2.go new file mode 100644 index 00000000..c0bc19b3 --- /dev/null +++ b/registry/session_v2.go @@ -0,0 +1,390 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "strconv" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" + "github.com/gorilla/mux" +) + +func newV2RegistryRouter() *mux.Router { + router := mux.NewRouter() + + v2Router := router.PathPrefix("/v2/").Subrouter() + + // Version Info + v2Router.Path("/version").Name("version") + + // Image Manifests + v2Router.Path("/manifest/{imagename:[a-z0-9-._/]+}/{tagname:[a-zA-Z0-9-._]+}").Name("manifests") + + // List Image Tags + v2Router.Path("/tags/{imagename:[a-z0-9-._/]+}").Name("tags") + + // Download a blob + v2Router.Path("/blob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}/{sum:[a-fA-F0-9]{4,}}").Name("downloadBlob") + + // Upload a blob + v2Router.Path("/blob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}").Name("uploadBlob") + + // Mounting a blob in an image + v2Router.Path("/mountblob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}/{sum:[a-fA-F0-9]{4,}}").Name("mountBlob") + + return router +} + +// APIVersion2 /v2/ +var v2HTTPRoutes = newV2RegistryRouter() + +func getV2URL(e *Endpoint, routeName string, vars map[string]string) (*url.URL, error) { + route := v2HTTPRoutes.Get(routeName) + if route == nil { + return nil, fmt.Errorf("unknown regisry v2 route name: %q", routeName) + } + + varReplace := make([]string, 0, len(vars)*2) + for key, val := range vars { + varReplace = append(varReplace, key, val) + } + + routePath, err := route.URLPath(varReplace...) + if err != nil { + return nil, fmt.Errorf("unable to make registry route %q with vars %v: %s", routeName, vars, err) + } + u, err := url.Parse(REGISTRYSERVER) + if err != nil { + return nil, fmt.Errorf("invalid registry url: %s", err) + } + + return &url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: routePath.Path, + }, nil +} + +// V2 Provenance POC + +func (r *Session) GetV2Version(token []string) (*RegistryInfo, error) { + routeURL, err := getV2URL(r.indexEndpoint, "version", nil) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d fetching Version", res.StatusCode), res) + } + + decoder := json.NewDecoder(res.Body) + versionInfo := new(RegistryInfo) + + err = decoder.Decode(versionInfo) + if err != nil { + return nil, fmt.Errorf("unable to decode GetV2Version JSON response: %s", err) + } + + return versionInfo, nil +} + +// +// 1) Check if TarSum of each layer exists /v2/ +// 1.a) if 200, continue +// 1.b) if 300, then push the +// 1.c) if anything else, err +// 2) PUT the created/signed manifest +// +func (r *Session) GetV2ImageManifest(imageName, tagName string, token []string) ([]byte, error) { + vars := map[string]string{ + "imagename": imageName, + "tagname": tagName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "manifests", vars) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } else if res.StatusCode == 404 { + return nil, ErrDoesNotExist + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res) + } + + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("Error while reading the http response: %s", err) + } + return buf, nil +} + +// - Succeeded to mount for this image scope +// - Failed with no error (So continue to Push the Blob) +// - Failed with error +func (r *Session) PostV2ImageMountBlob(imageName, sumType, sum string, token []string) (bool, error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "mountBlob", vars) + if err != nil { + return false, err + } + + method := "POST" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return false, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return false, err + } + res.Body.Close() // close early, since we're not needing a body on this call .. yet? + switch res.StatusCode { + case 200: + // return something indicating no push needed + return true, nil + case 300: + // return something indicating blob push needed + return false, nil + } + return false, fmt.Errorf("Failed to mount %q - %s:%s : %d", imageName, sumType, sum, res.StatusCode) +} + +func (r *Session) GetV2ImageBlob(imageName, sumType, sum string, blobWrtr io.Writer, token []string) error { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "downloadBlob", vars) + if err != nil { + return err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return errLoginRequired + } + return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) + } + + _, err = io.Copy(blobWrtr, res.Body) + return err +} + +func (r *Session) GetV2ImageBlobReader(imageName, sumType, sum string, token []string) (io.ReadCloser, int64, error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "downloadBlob", vars) + if err != nil { + return nil, 0, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, 0, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, 0, err + } + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, 0, errLoginRequired + } + return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) + } + lenStr := res.Header.Get("Content-Length") + l, err := strconv.ParseInt(lenStr, 10, 64) + if err != nil { + return nil, 0, err + } + + return res.Body, l, err +} + +// Push the image to the server for storage. +// 'layer' is an uncompressed reader of the blob to be pushed. +// The server will generate it's own checksum calculation. +func (r *Session) PutV2ImageBlob(imageName, sumType string, blobRdr io.Reader, token []string) (serverChecksum string, err error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + } + + routeURL, err := getV2URL(r.indexEndpoint, "uploadBlob", vars) + if err != nil { + return "", err + } + + method := "PUT" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), blobRdr) + if err != nil { + return "", err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return "", err + } + defer res.Body.Close() + if res.StatusCode != 201 { + if res.StatusCode == 401 { + return "", errLoginRequired + } + return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob", res.StatusCode, imageName), res) + } + + type sumReturn struct { + Checksum string `json:"checksum"` + } + + decoder := json.NewDecoder(res.Body) + var sumInfo sumReturn + + err = decoder.Decode(&sumInfo) + if err != nil { + return "", fmt.Errorf("unable to decode PutV2ImageBlob JSON response: %s", err) + } + + // XXX this is a json struct from the registry, with its checksum + return sumInfo.Checksum, nil +} + +// Finally Push the (signed) manifest of the blobs we've just pushed +func (r *Session) PutV2ImageManifest(imageName, tagName string, manifestRdr io.Reader, token []string) error { + vars := map[string]string{ + "imagename": imageName, + "tagname": tagName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "manifests", vars) + if err != nil { + return err + } + + method := "PUT" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), manifestRdr) + if err != nil { + return err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 201 { + if res.StatusCode == 401 { + return errLoginRequired + } + return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) + } + + return nil +} + +// Given a repository name, returns a json array of string tags +func (r *Session) GetV2RemoteTags(imageName string, token []string) ([]string, error) { + vars := map[string]string{ + "imagename": imageName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "tags", vars) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } else if res.StatusCode == 404 { + return nil, ErrDoesNotExist + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res) + } + + decoder := json.NewDecoder(res.Body) + var tags []string + err = decoder.Decode(&tags) + if err != nil { + return nil, fmt.Errorf("Error while decoding the http response: %s", err) + } + return tags, nil +} diff --git a/registry/types.go b/registry/types.go new file mode 100644 index 00000000..3b429f19 --- /dev/null +++ b/registry/types.go @@ -0,0 +1,67 @@ +package registry + +type SearchResult struct { + StarCount int `json:"star_count"` + IsOfficial bool `json:"is_official"` + Name string `json:"name"` + IsTrusted bool `json:"is_trusted"` + Description string `json:"description"` +} + +type SearchResults struct { + Query string `json:"query"` + NumResults int `json:"num_results"` + Results []SearchResult `json:"results"` +} + +type RepositoryData struct { + ImgList map[string]*ImgData + Endpoints []string + Tokens []string +} + +type ImgData struct { + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +type RegistryInfo struct { + Version string `json:"version"` + Standalone bool `json:"standalone"` +} + +type FSLayer struct { + BlobSum string `json:"blobSum"` +} + +type ManifestHistory struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type ManifestData struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []*FSLayer `json:"fsLayers"` + History []*ManifestHistory `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +var DefaultAPIVersion APIVersion = APIVersion1 +var apiVersions = map[APIVersion]string{ + 1: "v1", + 2: "v2", +} + +const ( + APIVersion1 = iota + 1 + APIVersion2 +) diff --git a/runconfig/compare.go b/runconfig/compare.go new file mode 100644 index 00000000..5c1bf465 --- /dev/null +++ b/runconfig/compare.go @@ -0,0 +1,60 @@ +package runconfig + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.Memory != b.Memory || + a.MemorySwap != b.MemorySwap || + a.CpuShares != b.CpuShares || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.PortSpecs) != len(b.PortSpecs) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for i := 0; i < len(a.PortSpecs); i++ { + if a.PortSpecs[i] != b.PortSpecs[i] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/runconfig/config.go b/runconfig/config.go new file mode 100644 index 00000000..c00110bf --- /dev/null +++ b/runconfig/config.go @@ -0,0 +1,71 @@ +package runconfig + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" +) + +// Note: the Config structure should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +type Config struct { + Hostname string + Domainname string + User string + Memory int64 // Memory limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 // CPU shares (relative weight vs. other containers) + Cpuset string // Cpuset 0-2, 0,1 + AttachStdin bool + AttachStdout bool + AttachStderr bool + PortSpecs []string // Deprecated - Can be in the format of 8080/tcp + ExposedPorts map[nat.Port]struct{} + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string + Cmd []string + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} + WorkingDir string + Entrypoint []string + NetworkDisabled bool + OnBuild []string +} + +func ContainerConfigFromJob(job *engine.Job) *Config { + config := &Config{ + Hostname: job.Getenv("Hostname"), + Domainname: job.Getenv("Domainname"), + User: job.Getenv("User"), + Memory: job.GetenvInt64("Memory"), + MemorySwap: job.GetenvInt64("MemorySwap"), + CpuShares: job.GetenvInt64("CpuShares"), + Cpuset: job.Getenv("Cpuset"), + AttachStdin: job.GetenvBool("AttachStdin"), + AttachStdout: job.GetenvBool("AttachStdout"), + AttachStderr: job.GetenvBool("AttachStderr"), + Tty: job.GetenvBool("Tty"), + OpenStdin: job.GetenvBool("OpenStdin"), + StdinOnce: job.GetenvBool("StdinOnce"), + Image: job.Getenv("Image"), + WorkingDir: job.Getenv("WorkingDir"), + NetworkDisabled: job.GetenvBool("NetworkDisabled"), + } + job.GetenvJson("ExposedPorts", &config.ExposedPorts) + job.GetenvJson("Volumes", &config.Volumes) + if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { + config.PortSpecs = PortSpecs + } + if Env := job.GetenvList("Env"); Env != nil { + config.Env = Env + } + if Cmd := job.GetenvList("Cmd"); Cmd != nil { + config.Cmd = Cmd + } + if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { + config.Entrypoint = Entrypoint + } + return config +} diff --git a/runconfig/config_test.go b/runconfig/config_test.go new file mode 100644 index 00000000..d94ec4ec --- /dev/null +++ b/runconfig/config_test.go @@ -0,0 +1,264 @@ +package runconfig + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/docker/nat" +) + +func parse(t *testing.T, args string) (*Config, *HostConfig, error) { + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " "), nil) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*Config, *HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return fmt.Errorf("strings don't match") +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } + + if _, _, err := parse(t, "--link a"); err == nil { + t.Fatalf("Error parsing links. `--link a` should be an error but is not") + } + if _, _, err := parse(t, "--link"); err == nil { + t.Fatalf("Error parsing links. `--link` should be an error but is not") + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) + } else if _, exists := config.Volumes["/var"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerVar"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) + } else if len(config.Volumes) != 0 { + t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) + } + + if _, _, err := parse(t, "-v /"); err == nil { + t.Fatalf("Expected error, but got none") + } + + if _, _, err := parse(t, "-v /:/"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") + } + if _, _, err := parse(t, "-v"); err == nil { + t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:ro"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") + } + if _, _, err := parse(t, "-v :"); err == nil { + t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") + } + if _, _, err := parse(t, "-v ::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") + } +} + +func TestCompare(t *testing.T) { + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + config1 := Config{ + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes1, + } + config3 := Config{ + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes1, + } + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + config5 := Config{ + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes2, + } + if Compare(&config1, &config3) { + t.Fatalf("Compare should return false, PortSpecs are different") + } + if Compare(&config1, &config5) { + t.Fatalf("Compare should return false, Volumes are different") + } + if !Compare(&config1, &config1) { + t.Fatalf("Compare should return true") + } +} + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + configImage := &Config{ + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &Config{ + PortSpecs: []string{"3333:2222", "3333:3333"}, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := Merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &Config{ + ExposedPorts: ports, + } + + if err := Merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs) + } + } + +} diff --git a/runconfig/exec.go b/runconfig/exec.go new file mode 100644 index 00000000..07de3e43 --- /dev/null +++ b/runconfig/exec.go @@ -0,0 +1,75 @@ +package runconfig + +import ( + "github.com/docker/docker/engine" + flag "github.com/docker/docker/pkg/mflag" +) + +type ExecConfig struct { + User string + Privileged bool + Tty bool + Container string + AttachStdin bool + AttachStderr bool + AttachStdout bool + Detach bool + Cmd []string +} + +func ExecConfigFromJob(job *engine.Job) *ExecConfig { + execConfig := &ExecConfig{ + User: job.Getenv("User"), + Privileged: job.GetenvBool("Privileged"), + Tty: job.GetenvBool("Tty"), + Container: job.Getenv("Container"), + AttachStdin: job.GetenvBool("AttachStdin"), + AttachStderr: job.GetenvBool("AttachStderr"), + AttachStdout: job.GetenvBool("AttachStdout"), + } + if cmd := job.GetenvList("Cmd"); cmd != nil { + execConfig.Cmd = cmd + } + + return execConfig +} + +func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { + var ( + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + execCmd []string + container string + ) + if err := cmd.Parse(args); err != nil { + return nil, err + } + parsedArgs := cmd.Args() + if len(parsedArgs) > 1 { + container = cmd.Arg(0) + execCmd = parsedArgs[1:] + } + + execConfig := &ExecConfig{ + // TODO(vishh): Expose '-u' flag once it is supported. + User: "", + // TODO(vishh): Expose '-p' flag once it is supported. + Privileged: false, + Tty: *flTty, + Cmd: execCmd, + Container: container, + Detach: *flDetach, + } + + // If -d is not set, attach to everything by default + if !*flDetach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if *flStdin { + execConfig.AttachStdin = true + } + } + + return execConfig, nil +} diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go new file mode 100644 index 00000000..ae75434d --- /dev/null +++ b/runconfig/hostconfig.go @@ -0,0 +1,121 @@ +package runconfig + +import ( + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" + "github.com/docker/docker/utils" +) + +type NetworkMode string + +// IsPrivate indicates whether container use it's private network stack +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer() || n.IsNone()) +} + +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +type HostConfig struct { + Binds []string + ContainerIDFile string + LxcConf []utils.KeyValuePair + Privileged bool + PortBindings nat.PortMap + Links []string + PublishAllPorts bool + Dns []string + DnsSearch []string + ExtraHosts []string + VolumesFrom []string + Devices []DeviceMapping + NetworkMode NetworkMode + CapAdd []string + CapDrop []string + RestartPolicy RestartPolicy + SecurityOpt []string +} + +// This is used by the create command when you want to set both the +// Config and the HostConfig in the same call +type ConfigAndHostConfig struct { + Config + HostConfig HostConfig +} + +func MergeConfigs(config *Config, hostConfig *HostConfig) *ConfigAndHostConfig { + return &ConfigAndHostConfig{ + *config, + *hostConfig, + } +} + +func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { + if job.EnvExists("HostConfig") { + hostConfig := HostConfig{} + job.GetenvJson("HostConfig", &hostConfig) + return &hostConfig + } + + hostConfig := &HostConfig{ + ContainerIDFile: job.Getenv("ContainerIDFile"), + Privileged: job.GetenvBool("Privileged"), + PublishAllPorts: job.GetenvBool("PublishAllPorts"), + NetworkMode: NetworkMode(job.Getenv("NetworkMode")), + } + + job.GetenvJson("LxcConf", &hostConfig.LxcConf) + job.GetenvJson("PortBindings", &hostConfig.PortBindings) + job.GetenvJson("Devices", &hostConfig.Devices) + job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) + hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") + if Binds := job.GetenvList("Binds"); Binds != nil { + hostConfig.Binds = Binds + } + if Links := job.GetenvList("Links"); Links != nil { + hostConfig.Links = Links + } + if Dns := job.GetenvList("Dns"); Dns != nil { + hostConfig.Dns = Dns + } + if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { + hostConfig.DnsSearch = DnsSearch + } + if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil { + hostConfig.ExtraHosts = ExtraHosts + } + if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { + hostConfig.VolumesFrom = VolumesFrom + } + if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { + hostConfig.CapAdd = CapAdd + } + if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { + hostConfig.CapDrop = CapDrop + } + + return hostConfig +} diff --git a/runconfig/merge.go b/runconfig/merge.go new file mode 100644 index 00000000..64950bf6 --- /dev/null +++ b/runconfig/merge.go @@ -0,0 +1,107 @@ +package runconfig + +import ( + "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/log" +) + +func Merge(userConf, imageConf *Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if userConf.Memory == 0 { + userConf.Memory = imageConf.Memory + } + if userConf.MemorySwap == 0 { + userConf.MemorySwap = imageConf.MemorySwap + } + if userConf.CpuShares == 0 { + userConf.CpuShares = imageConf.CpuShares + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.PortSpecs) > 0 { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + ports, _, err := nat.ParsePortSpecs(userConf.PortSpecs) + if err != nil { + return err + } + for port := range ports { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + userConf.PortSpecs = nil + } + if len(imageConf.PortSpecs) > 0 { + // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. + log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(imageConf.PortSpecs) + if err != nil { + return err + } + for port := range ports { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if imageEnvKey == userEnvKey { + found = true + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + return nil +} diff --git a/runconfig/parse.go b/runconfig/parse.go new file mode 100644 index 00000000..43976f60 --- /dev/null +++ b/runconfig/parse.go @@ -0,0 +1,406 @@ +package runconfig + +import ( + "fmt" + "path" + "strconv" + "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/utils" +) + +var ( + ErrInvalidWorkingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior.") + ErrConflictContainerNetworkAndDns = fmt.Errorf("Conflicting options: --net=container can't be used with --dns. This configuration is invalid.") + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)") + ErrConflictHostNetworkAndDns = fmt.Errorf("Conflicting options: --net=host can't be used with --dns. This configuration is invalid.") + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior.") +) + +func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + var ( + // FIXME: use utils.ListOpts for attach and volumes? + flAttach = opts.NewListOpts(opts.ValidateAttach) + flVolumes = opts.NewListOpts(opts.ValidatePath) + flLinks = opts.NewListOpts(opts.ValidateLink) + flEnv = opts.NewListOpts(opts.ValidateEnv) + flDevices = opts.NewListOpts(opts.ValidatePath) + + flPublish = opts.NewListOpts(nil) + flExpose = opts.NewListOpts(nil) + flDns = opts.NewListOpts(opts.ValidateIPAddress) + flDnsSearch = opts.NewListOpts(opts.ValidateDnsSearch) + flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) + flVolumesFrom = opts.NewListOpts(nil) + flLxcOpts = opts.NewListOpts(nil) + flEnvFile = opts.NewListOpts(nil) + flCapAdd = opts.NewListOpts(nil) + flCapDrop = opts.NewListOpts(nil) + flSecurityOpt = opts.NewListOpts(nil) + + flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container") + flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") + flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces") + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") + flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") + flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") + flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: , where unit = b, k, m or g)") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") + flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") + flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.") + flRestartPolicy = cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits (no, on-failure[:max-retry], always)") + ) + + cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR.") + cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)") + cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container in the form of name:alias") + cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)") + + cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") + cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of environment variables") + + cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host\nformat: %s\n(use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) + cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") + cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers") + cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains") + cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") + cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + + cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") + cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") + cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") + + if err := cmd.Parse(args); err != nil { + return nil, nil, cmd, err + } + + // Check if the kernel supports memory limit cgroup. + if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit { + *flMemoryString = "" + } + + // Validate input params + if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { + return nil, nil, cmd, ErrInvalidWorkingDirectory + } + + var ( + attachStdin = flAttach.Get("stdin") + attachStdout = flAttach.Get("stdout") + attachStderr = flAttach.Get("stderr") + ) + + if *flNetMode != "bridge" && *flNetMode != "none" && *flHostname != "" { + return nil, nil, cmd, ErrConflictNetworkHostname + } + + if *flNetMode == "host" && flLinks.Len() > 0 { + return nil, nil, cmd, ErrConflictHostNetworkAndLinks + } + + if *flNetMode == "container" && flLinks.Len() > 0 { + return nil, nil, cmd, ErrConflictContainerNetworkAndLinks + } + + if *flNetMode == "host" && flDns.Len() > 0 { + return nil, nil, cmd, ErrConflictHostNetworkAndDns + } + + if *flNetMode == "container" && flDns.Len() > 0 { + return nil, nil, cmd, ErrConflictContainerNetworkAndDns + } + + // If neither -d or -a are set, attach to everything by default + if flAttach.Len() == 0 { + attachStdout = true + attachStderr = true + if *flStdin { + attachStdin = true + } + } + + var flMemory int64 + if *flMemoryString != "" { + parsedMemory, err := units.RAMInBytes(*flMemoryString) + if err != nil { + return nil, nil, cmd, err + } + flMemory = parsedMemory + } + + var binds []string + // add any bind targets to the list of container volumes + for bind := range flVolumes.GetMap() { + if arr := strings.Split(bind, ":"); len(arr) > 1 { + if arr[1] == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid bind mount: destination can't be '/'") + } + // after creating the bind mount we want to delete it from the flVolumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + flVolumes.Delete(bind) + } else if bind == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'") + } + } + + var ( + parsedArgs = cmd.Args() + runCmd []string + entrypoint []string + image string + ) + if len(parsedArgs) >= 1 { + image = cmd.Arg(0) + } + if len(parsedArgs) > 1 { + runCmd = parsedArgs[1:] + } + if *flEntrypoint != "" { + entrypoint = []string{*flEntrypoint} + } + + lxcConf, err := parseKeyValueOpts(flLxcOpts) + if err != nil { + return nil, nil, cmd, err + } + + var ( + domainname string + hostname = *flHostname + parts = strings.SplitN(hostname, ".", 2) + ) + if len(parts) > 1 { + hostname = parts[0] + domainname = parts[1] + } + + ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) + if err != nil { + return nil, nil, cmd, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range flExpose.GetAll() { + if strings.Contains(e, ":") { + return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) + } + p := nat.NewPort(nat.SplitProtoPort(e)) + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + + // parse device mappings + deviceMappings := []DeviceMapping{} + for _, device := range flDevices.GetAll() { + deviceMapping, err := ParseDevice(device) + if err != nil { + return nil, nil, cmd, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables := []string{} + for _, ef := range flEnvFile.GetAll() { + parsedVars, err := opts.ParseEnvFile(ef) + if err != nil { + return nil, nil, cmd, err + } + envVariables = append(envVariables, parsedVars...) + } + // parse the '-e' and '--env' after, to allow override + envVariables = append(envVariables, flEnv.GetAll()...) + + netMode, err := parseNetMode(*flNetMode) + if err != nil { + return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) + } + + restartPolicy, err := parseRestartPolicy(*flRestartPolicy) + if err != nil { + return nil, nil, cmd, err + } + + config := &Config{ + Hostname: hostname, + Domainname: domainname, + PortSpecs: nil, // Deprecated + ExposedPorts: ports, + User: *flUser, + Tty: *flTty, + NetworkDisabled: !*flNetwork, + OpenStdin: *flStdin, + Memory: flMemory, + CpuShares: *flCpuShares, + Cpuset: *flCpuset, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: image, + Volumes: flVolumes.GetMap(), + Entrypoint: entrypoint, + WorkingDir: *flWorkingDir, + } + + hostConfig := &HostConfig{ + Binds: binds, + ContainerIDFile: *flContainerIDFile, + LxcConf: lxcConf, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + Dns: flDns.GetAll(), + DnsSearch: flDnsSearch.GetAll(), + ExtraHosts: flExtraHosts.GetAll(), + VolumesFrom: flVolumesFrom.GetAll(), + NetworkMode: netMode, + Devices: deviceMappings, + CapAdd: flCapAdd.GetAll(), + CapDrop: flCapDrop.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: flSecurityOpt.GetAll(), + } + + if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { + //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") + config.MemorySwap = -1 + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + return config, hostConfig, cmd, nil +} + +// parseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func parseRestartPolicy(policy string) (RestartPolicy, error) { + p := RestartPolicy{} + + if policy == "" { + return p, nil + } + + var ( + parts = strings.Split(policy, ":") + name = parts[0] + ) + + switch name { + case "always": + p.Name = name + + if len(parts) == 2 { + return p, fmt.Errorf("maximum restart count not valid with restart policy of \"always\"") + } + case "no": + // do nothing + case "on-failure": + p.Name = name + + if len(parts) == 2 { + count, err := strconv.Atoi(parts[1]) + if err != nil { + return p, err + } + + p.MaximumRetryCount = count + } + default: + return p, fmt.Errorf("invalid restart policy %s", name) + } + + return p, nil +} + +// options will come in the format of name.key=value or name.option +func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { + out := make(map[string][]string, len(opts.GetAll())) + for _, o := range opts.GetAll() { + parts := strings.SplitN(o, ".", 2) + if len(parts) < 2 { + return nil, fmt.Errorf("invalid opt format %s", o) + } else if strings.TrimSpace(parts[0]) == "" { + return nil, fmt.Errorf("key cannot be empty %s", o) + } + values, exists := out[parts[0]] + if !exists { + values = []string{} + } + out[parts[0]] = append(values, parts[1]) + } + return out, nil +} + +func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { + out := make([]utils.KeyValuePair, opts.Len()) + for i, o := range opts.GetAll() { + k, v, err := parsers.ParseKeyValueOpt(o) + if err != nil { + return nil, err + } + out[i] = utils.KeyValuePair{Key: k, Value: v} + } + return out, nil +} + +func parseNetMode(netMode string) (NetworkMode, error) { + parts := strings.Split(netMode, ":") + switch mode := parts[0]; mode { + case "bridge", "none", "host": + case "container": + if len(parts) < 2 || parts[1] == "" { + return "", fmt.Errorf("invalid container format container:") + } + default: + return "", fmt.Errorf("invalid --net: %s", netMode) + } + return NetworkMode(netMode), nil +} + +func ParseDevice(device string) (DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + dst = arr[1] + fallthrough + case 1: + src = arr[0] + default: + return DeviceMapping{}, fmt.Errorf("Invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go new file mode 100644 index 00000000..e807180d --- /dev/null +++ b/runconfig/parse_test.go @@ -0,0 +1,60 @@ +package runconfig + +import ( + "io/ioutil" + "testing" + + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" +) + +func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + cmd := flag.NewFlagSet("run", flag.ContinueOnError) + cmd.SetOutput(ioutil.Discard) + cmd.Usage = nil + return Parse(cmd, args, sysInfo) +} + +func TestParseLxcConfOpt(t *testing.T) { + opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} + + for _, o := range opts { + k, v, err := parsers.ParseKeyValueOpt(o) + if err != nil { + t.FailNow() + } + if k != "lxc.utsname" { + t.Fail() + } + if v != "docker" { + t.Fail() + } + } +} + +func TestNetHostname(t *testing.T) { + if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) + } +} diff --git a/trust/service.go b/trust/service.go new file mode 100644 index 00000000..c056ac71 --- /dev/null +++ b/trust/service.go @@ -0,0 +1,74 @@ +package trust + +import ( + "fmt" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/libtrust" +) + +func (t *TrustStore) Install(eng *engine.Engine) error { + for name, handler := range map[string]engine.Handler{ + "trust_key_check": t.CmdCheckKey, + "trust_update_base": t.CmdUpdateBase, + } { + if err := eng.Register(name, handler); err != nil { + return fmt.Errorf("Could not register %q: %v", name, err) + } + } + return nil +} + +func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s NAMESPACE", job.Name) + } + var ( + namespace = job.Args[0] + keyBytes = job.Getenv("PublicKey") + ) + + if keyBytes == "" { + return job.Errorf("Missing PublicKey") + } + pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes)) + if err != nil { + return job.Errorf("Error unmarshalling public key: %s", err) + } + + permission := uint16(job.GetenvInt("Permission")) + if permission == 0 { + permission = 0x03 + } + + t.RLock() + defer t.RUnlock() + if t.graph == nil { + job.Stdout.Write([]byte("no graph")) + return engine.StatusOK + } + + // Check if any expired grants + verified, err := t.graph.Verify(pk, namespace, permission) + if err != nil { + return job.Errorf("Error verifying key to namespace: %s", namespace) + } + if !verified { + log.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID()) + job.Stdout.Write([]byte("not verified")) + } else if t.expiration.Before(time.Now()) { + job.Stdout.Write([]byte("expired")) + } else { + job.Stdout.Write([]byte("verified")) + } + + return engine.StatusOK +} + +func (t *TrustStore) CmdUpdateBase(job *engine.Job) engine.Status { + t.fetch() + + return engine.StatusOK +} diff --git a/trust/trusts.go b/trust/trusts.go new file mode 100644 index 00000000..a3c0f5f5 --- /dev/null +++ b/trust/trusts.go @@ -0,0 +1,199 @@ +package trust + +import ( + "crypto/x509" + "errors" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "sync" + "time" + + "github.com/docker/docker/pkg/log" + "github.com/docker/libtrust/trustgraph" +) + +type TrustStore struct { + path string + caPool *x509.CertPool + graph trustgraph.TrustGraph + expiration time.Time + fetcher *time.Timer + fetchTime time.Duration + autofetch bool + httpClient *http.Client + baseEndpoints map[string]*url.URL + + sync.RWMutex +} + +// defaultFetchtime represents the starting duration to wait between +// fetching sections of the graph. Unsuccessful fetches should +// increase time between fetching. +const defaultFetchtime = 45 * time.Second + +var baseEndpoints = map[string]string{"official": "https://dvjy3tqbc323p.cloudfront.net/trust/official.json"} + +func NewTrustStore(path string) (*TrustStore, error) { + abspath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + // Create base graph url map + endpoints := map[string]*url.URL{} + for name, endpoint := range baseEndpoints { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + endpoints[name] = u + } + + // Load grant files + t := &TrustStore{ + path: abspath, + caPool: nil, + httpClient: &http.Client{}, + fetchTime: time.Millisecond, + baseEndpoints: endpoints, + } + + err = t.reload() + if err != nil { + return nil, err + } + + return t, nil +} + +func (t *TrustStore) reload() error { + t.Lock() + defer t.Unlock() + + matches, err := filepath.Glob(filepath.Join(t.path, "*.json")) + if err != nil { + return err + } + statements := make([]*trustgraph.Statement, len(matches)) + for i, match := range matches { + f, err := os.Open(match) + if err != nil { + return err + } + statements[i], err = trustgraph.LoadStatement(f, nil) + if err != nil { + f.Close() + return err + } + f.Close() + } + if len(statements) == 0 { + if t.autofetch { + log.Debugf("No grants, fetching") + t.fetcher = time.AfterFunc(t.fetchTime, t.fetch) + } + return nil + } + + grants, expiration, err := trustgraph.CollapseStatements(statements, true) + if err != nil { + return err + } + + t.expiration = expiration + t.graph = trustgraph.NewMemoryGraph(grants) + log.Debugf("Reloaded graph with %d grants expiring at %s", len(grants), expiration) + + if t.autofetch { + nextFetch := expiration.Sub(time.Now()) + if nextFetch < 0 { + nextFetch = defaultFetchtime + } else { + nextFetch = time.Duration(0.8 * (float64)(nextFetch)) + } + t.fetcher = time.AfterFunc(nextFetch, t.fetch) + } + + return nil +} + +func (t *TrustStore) fetchBaseGraph(u *url.URL) (*trustgraph.Statement, error) { + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Body: nil, + Host: u.Host, + } + + resp, err := t.httpClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode == 404 { + return nil, errors.New("base graph does not exist") + } + + defer resp.Body.Close() + + return trustgraph.LoadStatement(resp.Body, t.caPool) +} + +// fetch retrieves updated base graphs. This function cannot error, it +// should only log errors +func (t *TrustStore) fetch() { + t.Lock() + defer t.Unlock() + + if t.autofetch && t.fetcher == nil { + // Do nothing ?? + return + } + + fetchCount := 0 + for bg, ep := range t.baseEndpoints { + statement, err := t.fetchBaseGraph(ep) + if err != nil { + log.Infof("Trust graph fetch failed: %s", err) + continue + } + b, err := statement.Bytes() + if err != nil { + log.Infof("Bad trust graph statement: %s", err) + continue + } + // TODO check if value differs + err = ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600) + if err != nil { + log.Infof("Error writing trust graph statement: %s", err) + } + fetchCount++ + } + log.Debugf("Fetched %d base graphs at %s", fetchCount, time.Now()) + + if fetchCount > 0 { + go func() { + err := t.reload() + if err != nil { + // TODO log + log.Infof("Reload of trust graph failed: %s", err) + } + }() + t.fetchTime = defaultFetchtime + t.fetcher = nil + } else if t.autofetch { + maxTime := 10 * defaultFetchtime + t.fetchTime = time.Duration(1.5 * (float64)(t.fetchTime+time.Second)) + if t.fetchTime > maxTime { + t.fetchTime = maxTime + } + t.fetcher = time.AfterFunc(t.fetchTime, t.fetch) + } +} diff --git a/utils/daemon.go b/utils/daemon.go new file mode 100644 index 00000000..871122ed --- /dev/null +++ b/utils/daemon.go @@ -0,0 +1,36 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "strconv" +) + +func CreatePidFile(pidfile string) error { + if pidString, err := ioutil.ReadFile(pidfile); err == nil { + pid, err := strconv.Atoi(string(pidString)) + if err == nil { + if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile) + } + } + } + + file, err := os.Create(pidfile) + if err != nil { + return err + } + + defer file.Close() + + _, err = fmt.Fprintf(file, "%d", os.Getpid()) + return err +} + +func RemovePidFile(pidfile string) { + if err := os.Remove(pidfile); err != nil { + log.Printf("Error removing %s: %s", pidfile, err) + } +} diff --git a/utils/http.go b/utils/http.go new file mode 100644 index 00000000..c877eefd --- /dev/null +++ b/utils/http.go @@ -0,0 +1,164 @@ +package utils + +import ( + "io" + "net/http" + "strings" + + "github.com/docker/docker/pkg/log" +) + +// VersionInfo is used to model entities which has a version. +// It is basically a tupple with name and version. +type VersionInfo interface { + Name() string + Version() string +} + +func validVersion(version VersionInfo) bool { + const stopChars = " \t\r\n/" + name := version.Name() + vers := version.Version() + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// Convert versions to a string and append the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the Name() method, while +// version is get from the Version() method. Several pieces of verson information +// will be concatinated and separated by space. +func appendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !validVersion(v) { + continue + } + verstrs = append(verstrs, v.Name()+"/"+v.Version()) + } + return strings.Join(verstrs, " ") +} + +// HTTPRequestDecorator is used to change an instance of +// http.Request. It could be used to add more header fields, +// change body, etc. +type HTTPRequestDecorator interface { + // ChangeRequest() changes the request accordingly. + // The changed request will be returned or err will be non-nil + // if an error occur. + ChangeRequest(req *http.Request) (newReq *http.Request, err error) +} + +// HTTPUserAgentDecorator appends the product/version to the user agent field +// of a request. +type HTTPUserAgentDecorator struct { + versions []VersionInfo +} + +func NewHTTPUserAgentDecorator(versions ...VersionInfo) HTTPRequestDecorator { + return &HTTPUserAgentDecorator{ + versions: versions, + } +} + +func (h *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { + if req == nil { + return req, nil + } + + userAgent := appendVersions(req.UserAgent(), h.versions...) + if len(userAgent) > 0 { + req.Header.Set("User-Agent", userAgent) + } + return req, nil +} + +type HTTPMetaHeadersDecorator struct { + Headers map[string][]string +} + +func (h *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { + if h.Headers == nil { + return req, nil + } + for k, v := range h.Headers { + req.Header[k] = v + } + return req, nil +} + +type HTTPAuthDecorator struct { + login string + password string +} + +func NewHTTPAuthDecorator(login, password string) HTTPRequestDecorator { + return &HTTPAuthDecorator{ + login: login, + password: password, + } +} + +func (self *HTTPAuthDecorator) ChangeRequest(req *http.Request) (*http.Request, error) { + req.SetBasicAuth(self.login, self.password) + return req, nil +} + +// HTTPRequestFactory creates an HTTP request +// and applies a list of decorators on the request. +type HTTPRequestFactory struct { + decorators []HTTPRequestDecorator +} + +func NewHTTPRequestFactory(d ...HTTPRequestDecorator) *HTTPRequestFactory { + return &HTTPRequestFactory{ + decorators: d, + } +} + +func (self *HTTPRequestFactory) AddDecorator(d ...HTTPRequestDecorator) { + self.decorators = append(self.decorators, d...) +} + +// NewRequest() creates a new *http.Request, +// applies all decorators in the HTTPRequestFactory on the request, +// then applies decorators provided by d on the request. +func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + + // By default, a nil factory should work. + if h == nil { + return req, nil + } + for _, dec := range h.decorators { + req, err = dec.ChangeRequest(req) + if err != nil { + return nil, err + } + } + for _, dec := range d { + req, err = dec.ChangeRequest(req) + if err != nil { + return nil, err + } + } + log.Debugf("%v -- HEADERS: %v", req.URL, req.Header) + return req, err +} diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go new file mode 100644 index 00000000..3752c997 --- /dev/null +++ b/utils/jsonmessage.go @@ -0,0 +1,169 @@ +package utils + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/timeutils" + "github.com/docker/docker/pkg/units" +) + +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +type JSONProgress struct { + terminalFd uintptr + Current int `json:"current,omitempty"` + Total int `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + current := units.HumanSize(int64(p.Current)) + if p.Total <= 0 { + return fmt.Sprintf("%8v", current) + } + total := units.HumanSize(int64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if width > 110 { + // this number can't be negetive gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated +} + +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + // [2K = erase entire current line + fmt.Fprintf(out, "%c[2K\r", 27) + endl = "\r" + } else if jm.Progress != nil { //disable progressbar in non-terminal + return nil + } + if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + diff = 0 + ) + for { + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + line = len(ids) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + diff = 0 + } else { + diff = len(ids) - line + } + if jm.ID != "" && isTerminal { + // [{diff}A = move cursor up diff rows + fmt.Fprintf(out, "%c[%dA", 27, diff) + } + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + // [{diff}B = move cursor down diff rows + fmt.Fprintf(out, "%c[%dB", 27, diff) + } + if err != nil { + return err + } + } + return nil +} diff --git a/utils/jsonmessage_test.go b/utils/jsonmessage_test.go new file mode 100644 index 00000000..0ce9492c --- /dev/null +++ b/utils/jsonmessage_test.go @@ -0,0 +1,38 @@ +package utils + +import ( + "testing" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1 B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expected = "[=========================> ] 50 B/100 B" + jp3 := JSONProgress{Current: 50, Total: 100} + if jp3.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp3.String()) + } + + // this number can't be negetive gh#7136 + expected = "[==============================================================>] 50 B/40 B" + jp4 := JSONProgress{Current: 50, Total: 40} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } +} diff --git a/utils/progressreader.go b/utils/progressreader.go new file mode 100644 index 00000000..87eae8ba --- /dev/null +++ b/utils/progressreader.go @@ -0,0 +1,55 @@ +package utils + +import ( + "io" + "time" +) + +// Reader with progress bar +type progressReader struct { + reader io.ReadCloser // Stream to read from + output io.Writer // Where to send progress bar to + progress JSONProgress + lastUpdate int // How many bytes read at least update + ID string + action string + sf *StreamFormatter + newLine bool +} + +func (r *progressReader) Read(p []byte) (n int, err error) { + read, err := r.reader.Read(p) + r.progress.Current += read + updateEvery := 1024 * 512 //512kB + if r.progress.Total > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int(0.01 * float64(r.progress.Total)); increment < updateEvery { + updateEvery = increment + } + } + if r.progress.Current-r.lastUpdate > updateEvery || err != nil { + r.output.Write(r.sf.FormatProgress(r.ID, r.action, &r.progress)) + r.lastUpdate = r.progress.Current + } + // Send newline when complete + if r.newLine && err != nil && read == 0 { + r.output.Write(r.sf.FormatStatus("", "")) + } + return read, err +} +func (r *progressReader) Close() error { + r.progress.Current = r.progress.Total + r.output.Write(r.sf.FormatProgress(r.ID, r.action, &r.progress)) + return r.reader.Close() +} +func ProgressReader(r io.ReadCloser, size int, output io.Writer, sf *StreamFormatter, newline bool, ID, action string) *progressReader { + return &progressReader{ + reader: r, + output: NewWriteFlusher(output), + ID: ID, + action: action, + progress: JSONProgress{Total: size, Start: time.Now().UTC().Unix()}, + sf: sf, + newLine: newline, + } +} diff --git a/utils/random.go b/utils/random.go new file mode 100644 index 00000000..907f28ee --- /dev/null +++ b/utils/random.go @@ -0,0 +1,16 @@ +package utils + +import ( + "crypto/rand" + "encoding/hex" + "io" +) + +func RandomString() string { + id := make([]byte, 32) + + if _, err := io.ReadFull(rand.Reader, id); err != nil { + panic(err) // This shouldn't happen + } + return hex.EncodeToString(id) +} diff --git a/utils/streamformatter.go b/utils/streamformatter.go new file mode 100644 index 00000000..d0bc295b --- /dev/null +++ b/utils/streamformatter.go @@ -0,0 +1,112 @@ +package utils + +import ( + "encoding/json" + "fmt" + "io" +) + +type StreamFormatter struct { + json bool +} + +func NewStreamFormatter(json bool) *StreamFormatter { + return &StreamFormatter{json} +} + +const streamNewline = "\r\n" + +var streamNewlineBytes = []byte(streamNewline) + +func (sf *StreamFormatter) FormatStream(str string) []byte { + if sf.json { + b, err := json.Marshal(&JSONMessage{Stream: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + "\r") +} + +func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + if sf.json { + b, err := json.Marshal(&JSONMessage{ID: id, Status: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + streamNewline) +} + +func (sf *StreamFormatter) FormatError(err error) []byte { + if sf.json { + jsonError, ok := err.(*JSONError) + if !ok { + jsonError = &JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return append(b, streamNewlineBytes...) + } + return []byte("{\"error\":\"format error\"}" + streamNewline) + } + return []byte("Error: " + err.Error() + streamNewline) +} + +func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgress) []byte { + if progress == nil { + progress = &JSONProgress{} + } + if sf.json { + + b, err := json.Marshal(&JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + }) + if err != nil { + return nil + } + return b + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +func (sf *StreamFormatter) Json() bool { + return sf.json +} + +type StdoutFormater struct { + io.Writer + *StreamFormatter +} + +func (sf *StdoutFormater) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +type StderrFormater struct { + io.Writer + *StreamFormatter +} + +func (sf *StderrFormater) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} diff --git a/utils/streamformatter_test.go b/utils/streamformatter_test.go new file mode 100644 index 00000000..20610f6c --- /dev/null +++ b/utils/streamformatter_test.go @@ -0,0 +1,67 @@ +package utils + +import ( + "encoding/json" + "errors" + "reflect" + "testing" +) + +func TestFormatStream(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatStream("stream") + if string(res) != `{"stream":"stream"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatStatus(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatSimpleError(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatJSONError(t *testing.T) { + sf := NewStreamFormatter(true) + err := &JSONError{Code: 50, Message: "Json error"} + res := sf.FormatError(err) + if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatProgress(t *testing.T) { + sf := NewStreamFormatter(true) + progress := &JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.FormatProgress("id", "action", progress) + msg := &JSONMessage{} + if err := json.Unmarshal(res, msg); err != nil { + t.Fatal(err) + } + if msg.ID != "id" { + t.Fatalf("ID must be 'id', got: %s", msg.ID) + } + if msg.Status != "action" { + t.Fatalf("Status must be 'action', got: %s", msg.Status) + } + if msg.ProgressMessage != progress.String() { + t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) + } + if !reflect.DeepEqual(msg.Progress, progress) { + t.Fatal("Original progress not equals progress from FormatProgress") + } +} diff --git a/utils/timeoutconn.go b/utils/timeoutconn.go new file mode 100644 index 00000000..a3231c7e --- /dev/null +++ b/utils/timeoutconn.go @@ -0,0 +1,26 @@ +package utils + +import ( + "net" + "time" +) + +func NewTimeoutConn(conn net.Conn, timeout time.Duration) net.Conn { + return &TimeoutConn{conn, timeout} +} + +// A net.Conn that sets a deadline for every Read or Write operation +type TimeoutConn struct { + net.Conn + timeout time.Duration +} + +func (c *TimeoutConn) Read(b []byte) (int, error) { + if c.timeout > 0 { + err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)) + if err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff --git a/utils/timeoutconn_test.go b/utils/timeoutconn_test.go new file mode 100644 index 00000000..d07b96cc --- /dev/null +++ b/utils/timeoutconn_test.go @@ -0,0 +1,33 @@ +package utils + +import ( + "bufio" + "fmt" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestTimeoutConnRead(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "hello") + })) + defer ts.Close() + conn, err := net.Dial("tcp", ts.URL[7:]) + if err != nil { + t.Fatalf("failed to create connection to %q: %v", ts.URL, err) + } + tconn := NewTimeoutConn(conn, 1*time.Second) + + if _, err = bufio.NewReader(tconn).ReadString('\n'); err == nil { + t.Fatalf("expected timeout error, got none") + } + if _, err := fmt.Fprintf(tconn, "GET / HTTP/1.0\r\n\r\n"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if _, err = bufio.NewReader(tconn).ReadString('\n'); err != nil { + t.Errorf("unexpected error: %v", err) + } +} diff --git a/utils/tmpdir.go b/utils/tmpdir.go new file mode 100644 index 00000000..921a8f69 --- /dev/null +++ b/utils/tmpdir.go @@ -0,0 +1,12 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd + +package utils + +import ( + "os" +) + +// TempDir returns the default directory to use for temporary files. +func TempDir(rootdir string) (string error) { + return os.TempDir(), nil +} diff --git a/utils/tmpdir_unix.go b/utils/tmpdir_unix.go new file mode 100644 index 00000000..30d7c3a1 --- /dev/null +++ b/utils/tmpdir_unix.go @@ -0,0 +1,18 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd + +package utils + +import ( + "os" + "path/filepath" +) + +// TempDir returns the default directory to use for temporary files. +func TempDir(rootDir string) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + err := os.MkdirAll(tmpDir, 0700) + return tmpDir, err +} diff --git a/utils/utils.go b/utils/utils.go new file mode 100644 index 00000000..4c65f136 --- /dev/null +++ b/utils/utils.go @@ -0,0 +1,542 @@ +package utils + +import ( + "bytes" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" +) + +type KeyValuePair struct { + Key string + Value string +} + +var ( + validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) +) + +// Request a given URL and return an io.Reader +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +func Trunc(s string, maxlen int) string { + if len(s) <= maxlen { + return s + } + return s[:maxlen] +} + +// Figure out the absolute path of our own binary (if it's still around). +func SelfPath() string { + path, err := exec.LookPath(os.Args[0]) + if err != nil { + if os.IsNotExist(err) { + return "" + } + if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { + return "" + } + panic(err) + } + path, err = filepath.Abs(path) + if err != nil { + if os.IsNotExist(err) { + return "" + } + panic(err) + } + return path +} + +func dockerInitSha1(target string) string { + f, err := os.Open(target) + if err != nil { + return "" + } + defer f.Close() + h := sha1.New() + _, err = io.Copy(h, f) + if err != nil { + return "" + } + return hex.EncodeToString(h.Sum(nil)) +} + +func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) + if target == "" { + return false + } + if dockerversion.IAMSTATIC { + if selfPath == "" { + return false + } + if target == selfPath { + return true + } + targetFileInfo, err := os.Lstat(target) + if err != nil { + return false + } + selfPathFileInfo, err := os.Lstat(selfPath) + if err != nil { + return false + } + return os.SameFile(targetFileInfo, selfPathFileInfo) + } + return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 +} + +// Figure out the path of our dockerinit (which may be SelfPath()) +func DockerInitPath(localCopy string) string { + selfPath := SelfPath() + if isValidDockerInitPath(selfPath, selfPath) { + // if we're valid, don't bother checking anything else + return selfPath + } + var possibleInits = []string{ + localCopy, + dockerversion.INITPATH, + filepath.Join(filepath.Dir(selfPath), "dockerinit"), + + // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." + // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec + "/usr/libexec/docker/dockerinit", + "/usr/local/libexec/docker/dockerinit", + + // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." + // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA + "/usr/lib/docker/dockerinit", + "/usr/local/lib/docker/dockerinit", + } + for _, dockerInit := range possibleInits { + if dockerInit == "" { + continue + } + path, err := exec.LookPath(dockerInit) + if err == nil { + path, err = filepath.Abs(path) + if err != nil { + // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? + panic(err) + } + if isValidDockerInitPath(path, selfPath) { + return path + } + } + } + return "" +} + +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a langer prefix, or the full-length Id. +func TruncateID(id string) string { + shortLen := 12 + if len(id) < shortLen { + shortLen = len(id) + } + return id[:shortLen] +} + +// GenerateRandomID returns an unique id +func GenerateRandomID() string { + for { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + panic(err) // This shouldn't happen + } + value := hex.EncodeToString(id) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numberic and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { + continue + } + return value + } +} + +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + err := fmt.Errorf("image ID '%s' is invalid", id) + return err + } + return nil +} + +// Code c/c from io.Copy() modified to handle escape sequence +func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + // char 16 is C-p + if nr == 1 && buf[0] == 16 { + nr, er = src.Read(buf) + // char 17 is C-q + if nr == 1 && buf[0] == 17 { + if err := src.Close(); err != nil { + return 0, err + } + return 0, nil + } + } + // ---- End of docker + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// FIXME: this is deprecated by CopyWithTar in archive.go +func CopyDirectory(source, dest string) error { + if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil { + return fmt.Errorf("Error copy: %s (%s)", err, output) + } + return nil +} + +type WriteFlusher struct { + sync.Mutex + w io.Writer + flusher http.Flusher +} + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + wf.Lock() + defer wf.Unlock() + n, err = wf.w.Write(b) + wf.flusher.Flush() + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + wf.Lock() + defer wf.Unlock() + wf.flusher.Flush() +} + +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var flusher http.Flusher + if f, ok := w.(http.Flusher); ok { + flusher = f + } else { + flusher = &ioutils.NopFlusher{} + } + return &WriteFlusher{w: w, flusher: flusher} +} + +func NewHTTPRequestError(msg string, res *http.Response) error { + return &JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +func IsURL(str string) bool { + return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") +} + +func IsGIT(str string) bool { + return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str)) +} + +var ( + localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`) +) + +// RemoveLocalDns looks into the /etc/resolv.conf, +// and removes any local nameserver entries. +func RemoveLocalDns(resolvConf []byte) []byte { + return localHostRx.ReplaceAll(resolvConf, []byte{}) +} + +// An StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e *StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// Take a list of strings and escape them so they will be handled right +// when passed as arguments to an program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = RandomString()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = CopyDirectory(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +func CopyFile(src, dst string) (int64, error) { + if src == dst { + return 0, nil + } + sf, err := os.Open(src) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(dst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(dst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReplaceOrAppendValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + for _, value := range overrides { + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + return defaults +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// TreeSize walks a directory tree and returns its total size in bytes. +func TreeSize(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +func StringsContainsNoCase(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} diff --git a/utils/utils_test.go b/utils/utils_test.go new file mode 100644 index 00000000..ce304482 --- /dev/null +++ b/utils/utils_test.go @@ -0,0 +1,99 @@ +package utils + +import ( + "os" + "testing" +) + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/"} + o = []string{"HOME=/root", "TERM=xterm"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} diff --git a/volumes/MAINTAINERS b/volumes/MAINTAINERS new file mode 100644 index 00000000..469ffe0c --- /dev/null +++ b/volumes/MAINTAINERS @@ -0,0 +1 @@ +Brian Goff (@cpuguy83) diff --git a/volumes/repository.go b/volumes/repository.go new file mode 100644 index 00000000..2383f34a --- /dev/null +++ b/volumes/repository.go @@ -0,0 +1,217 @@ +package volumes + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" +) + +type Repository struct { + configPath string + driver graphdriver.Driver + volumes map[string]*Volume + lock sync.Mutex +} + +func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) { + abspath, err := filepath.Abs(configPath) + if err != nil { + return nil, err + } + + // Create the config path + if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + repo := &Repository{ + driver: driver, + configPath: abspath, + volumes: make(map[string]*Volume), + } + + return repo, repo.restore() +} + +func (r *Repository) newVolume(path string, writable bool) (*Volume, error) { + var ( + isBindMount bool + err error + id = utils.GenerateRandomID() + ) + if path != "" { + isBindMount = true + } + + if path == "" { + path, err = r.createNewVolumePath(id) + if err != nil { + return nil, err + } + } + path = filepath.Clean(path) + + path, err = filepath.EvalSymlinks(path) + if err != nil { + return nil, err + } + + v := &Volume{ + ID: id, + Path: path, + repository: r, + Writable: writable, + containers: make(map[string]struct{}), + configPath: r.configPath + "/" + id, + IsBindMount: isBindMount, + } + + if err := v.initialize(); err != nil { + return nil, err + } + + return v, r.add(v) +} + +func (r *Repository) restore() error { + dir, err := ioutil.ReadDir(r.configPath) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + path, err := r.driver.Get(id, "") + if err != nil { + log.Debugf("Could not find volume for %s: %v", id, err) + continue + } + vol := &Volume{ + ID: id, + configPath: r.configPath + "/" + id, + containers: make(map[string]struct{}), + Path: path, + } + if err := vol.FromDisk(); err != nil { + if !os.IsNotExist(err) { + log.Debugf("Error restoring volume: %v", err) + continue + } + if err := vol.initialize(); err != nil { + log.Debugf("%s", err) + continue + } + } + if err := r.add(vol); err != nil { + log.Debugf("Error restoring volume: %v", err) + } + } + return nil +} + +func (r *Repository) Get(path string) *Volume { + r.lock.Lock() + vol := r.get(path) + r.lock.Unlock() + return vol +} + +func (r *Repository) get(path string) *Volume { + path, err := filepath.EvalSymlinks(path) + if err != nil { + return nil + } + return r.volumes[filepath.Clean(path)] +} + +func (r *Repository) Add(volume *Volume) error { + r.lock.Lock() + defer r.lock.Unlock() + return r.add(volume) +} + +func (r *Repository) add(volume *Volume) error { + if vol := r.get(volume.Path); vol != nil { + return fmt.Errorf("Volume exists: %s", volume.ID) + } + r.volumes[volume.Path] = volume + return nil +} + +func (r *Repository) Remove(volume *Volume) { + r.lock.Lock() + r.remove(volume) + r.lock.Unlock() +} + +func (r *Repository) remove(volume *Volume) { + delete(r.volumes, volume.Path) +} + +func (r *Repository) Delete(path string) error { + r.lock.Lock() + defer r.lock.Unlock() + path, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + volume := r.get(filepath.Clean(path)) + if volume == nil { + return fmt.Errorf("Volume %s does not exist", path) + } + + if volume.IsBindMount { + return fmt.Errorf("Volume %s is a bind-mount and cannot be removed", volume.Path) + } + containers := volume.Containers() + if len(containers) > 0 { + return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers) + } + + if err := os.RemoveAll(volume.configPath); err != nil { + return err + } + + if err := r.driver.Remove(volume.ID); err != nil { + if !os.IsNotExist(err) { + return err + } + } + + r.remove(volume) + return nil +} + +func (r *Repository) createNewVolumePath(id string) (string, error) { + if err := r.driver.Create(id, ""); err != nil { + return "", err + } + + path, err := r.driver.Get(id, "") + if err != nil { + return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err) + } + + return path, nil +} + +func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) { + r.lock.Lock() + defer r.lock.Unlock() + + if path == "" { + return r.newVolume(path, writable) + } + + if v := r.get(path); v != nil { + return v, nil + } + + return r.newVolume(path, writable) +} diff --git a/volumes/volume.go b/volumes/volume.go new file mode 100644 index 00000000..e2d7a726 --- /dev/null +++ b/volumes/volume.go @@ -0,0 +1,139 @@ +package volumes + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/symlink" +) + +type Volume struct { + ID string + Path string + IsBindMount bool + Writable bool + containers map[string]struct{} + configPath string + repository *Repository + lock sync.Mutex +} + +func (v *Volume) IsDir() (bool, error) { + stat, err := os.Stat(v.Path) + if err != nil { + return false, err + } + + return stat.IsDir(), nil +} + +func (v *Volume) Containers() []string { + v.lock.Lock() + + var containers []string + for c := range v.containers { + containers = append(containers, c) + } + + v.lock.Unlock() + return containers +} + +func (v *Volume) RemoveContainer(containerId string) { + v.lock.Lock() + delete(v.containers, containerId) + v.lock.Unlock() +} + +func (v *Volume) AddContainer(containerId string) { + v.lock.Lock() + v.containers[containerId] = struct{}{} + v.lock.Unlock() +} + +func (v *Volume) createIfNotExist() error { + if stat, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) { + if stat.IsDir() { + os.MkdirAll(v.Path, 0755) + } + + if err := os.MkdirAll(filepath.Dir(v.Path), 0755); err != nil { + return err + } + f, err := os.OpenFile(v.Path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + return nil +} + +func (v *Volume) initialize() error { + v.lock.Lock() + defer v.lock.Unlock() + + if err := v.createIfNotExist(); err != nil { + return err + } + + if err := os.MkdirAll(v.configPath, 0755); err != nil { + return err + } + jsonPath, err := v.jsonPath() + if err != nil { + return err + } + f, err := os.Create(jsonPath) + if err != nil { + return err + } + defer f.Close() + + return v.toDisk() +} + +func (v *Volume) ToDisk() error { + v.lock.Lock() + defer v.lock.Unlock() + return v.toDisk() +} +func (v *Volume) toDisk() error { + data, err := json.Marshal(v) + if err != nil { + return err + } + + pth, err := v.jsonPath() + if err != nil { + return err + } + + return ioutil.WriteFile(pth, data, 0666) +} +func (v *Volume) FromDisk() error { + v.lock.Lock() + defer v.lock.Unlock() + pth, err := v.jsonPath() + if err != nil { + return err + } + + data, err := ioutil.ReadFile(pth) + if err != nil { + return err + } + + return json.Unmarshal(data, v) +} + +func (v *Volume) jsonPath() (string, error) { + return v.getRootResourcePath("config.json") +} +func (v *Volume) getRootResourcePath(path string) (string, error) { + cleanPath := filepath.Join("/", path) + return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath) +} -- 2.30.2