--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++FROM tianon/debian-devel
++++
++++# start by adding just "debian/control" so we can get mk-build-deps with maximum caching
++++COPY control /usr/src/docker.io/debian/
++++WORKDIR /usr/src/docker.io
++++
++++# get all the build deps of _this_ package in a nice repeatable way
++++RUN apt-get update && mk-build-deps -irt'apt-get --no-install-recommends -yV' debian/control
++++
++++# need our debian/ directory to compile _this_ package
++++COPY . /usr/src/docker.io/debian
++++
++++# go download and unpack our upstream source
++++RUN uscan --force-download --verbose --download-current-version
++++RUN DOCKER_TARBALLS=.. ./debian/helpers/download-libcontainer
++++RUN /tianon/extract-origtargz.sh
++++
++++# tianon is _really_ lazy, and likes a preseeded bash history
++++RUN echo '/tianon/extract-origtargz.sh && dpkg-buildpackage -us -uc && lintian -EvIL+pedantic' >> /root/.bash_history
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Docker on Debian
++++================
++++
++++To enable docker memory limitation, the kernel needs to be loaded with
++++boot parameters: cgroup_enable=memory swapaccount=1.
++++
++++This is because enabling memory cgroup support has some run-time overhead,
++++and kernel maintainers don't want to slow down systems unnecessarily.
++++
++++http://www.mail-archive.com/debian-bugs-dist@lists.debian.org/msg764104.html
++++https://github.com/docker/docker/issues/396
++++
++++To instruct the kernel to enable memory cgroup support, edit
++++/etc/default/grub and extend GRUB_CMDLINE_LINUX_DEFAULT like:
++++
++++GRUB_CMDLINE_LINUX_DEFAULT="cgroup_enable=memory swapaccount=1"
++++
++++Then run update-grub, and reboot.
++++
++++
++++As noted in the upstream documentation (https://docs.docker.io), Docker will
++++allow non-root users in the "docker" group to access "docker.sock" and thus
++++communicate with the daemon. To add yourself to the "docker" group, use
++++something like:
++++
++++adduser YOURUSER docker
++++
++++As also noted in the upstream documentation, the "docker" group (and any other
++++means of accessing the Docker API) is root-equivalent. If you don't trust a
++++user with root on your box, you shouldn't trust them with Docker either. If you
++++are interested in further information about the security aspects of Docker,
++++please be sure to read the "Docker Security"
++++(http://docs.docker.io/en/latest/articles/security/) article in the
++++upstream documentation.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++docker.io (1.3.3~dfsg1-2) unstable; urgency=medium
++++
++++ * Add fatal-error-old-kernels.patch to make Docker refuse to start on old,
++++ unsupported kernels (Closes: #774376)
++++ * Fix dh_auto_clean to clean up after the build properly, especially to avoid
++++ FTBFS when built twice (Closes: #774482)
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Sat, 03 Jan 2015 00:11:47 -0700
++++
++++docker.io (1.3.3~dfsg1-1) unstable; urgency=medium
++++
++++ [ Tianon Gravi ]
++++ * Update to 1.3.3 upstream release (Closes: #772909)
++++ - Fix for CVE-2014-9356 (Path traversal during processing of absolute
++++ symlinks)
++++ - Fix for CVE-2014-9357 (Escalation of privileges during decompression of
++++ LZMA (.xz) archives)
++++ - Fix for CVE-2014-9358 (Path traversal and spoofing opportunities presented
++++ through image identifiers)
++++ * Fix bashism in nuke-graph-directory.sh (Closes: #772261)
++++
++++ [ Didier Roche ]
++++ * Support starting systemd service without /etc/default/docker
++++ (Closes: #770293)
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Thu, 18 Dec 2014 21:54:12 -0700
++++
++++docker.io (1.3.2~dfsg1-1) unstable; urgency=high
++++
++++ * Severity is set to high due to the sensitive nature of the CVEs this
++++ upload fixes.
++++ * Update to 1.3.2 upstream release
++++ - Fix for CVE-2014-6407 (Archive extraction host privilege escalation)
++++ - Fix for CVE-2014-6408 (Security options applied to image could lead
++++ to container escalation)
++++ * Remove Daniel Mizyrycki from Uploaders. Thanks for all your work!
++++
++++ -- Paul Tagliamonte <paultag@debian.org> Mon, 24 Nov 2014 19:14:28 -0500
++++
++++docker.io (1.3.1~dfsg1-2) unstable; urgency=medium
++++
++++ * Remove deprecated /usr/bin/docker.io symlink
++++ - added as a temporary shim in 1.0.0~dfsg1-1 (13 Jun 2014)
++++ - unused by package-installed files in 1.2.0~dfsg1-1 (13 Sep 2014)
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Fri, 07 Nov 2014 13:11:34 -0700
++++
++++docker.io (1.3.1~dfsg1-1) unstable; urgency=high
++++
++++ * Update to 1.3.1 upstream release
++++ - fix for CVE-2014-5277
++++ - https://groups.google.com/d/topic/docker-user/oYm0i3xShJU/discussion
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Mon, 03 Nov 2014 08:26:29 -0700
++++
++++docker.io (1.3.0~dfsg1-1) unstable; urgency=medium
++++
++++ * Updated to 1.3.0 upstream release.
++++ * Enable systemd socket activation (Closes: #752555).
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Fri, 17 Oct 2014 00:56:07 -0600
++++
++++docker.io (1.2.0~dfsg1-2) unstable; urgency=medium
++++
++++ * Added "golang-docker-dev" package for the reusable bits of Docker's source.
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Thu, 09 Oct 2014 00:08:11 +0000
++++
++++docker.io (1.2.0~dfsg1-1) unstable; urgency=medium
++++
++++ * Updated to 1.2.0 upstream release (Closes: #757183, #757023, #757024).
++++ * Added upstream man pages.
++++ * Updated bash and zsh completions to be installed as "docker" and "_docker".
++++ * Updated init scripts to also be installed as "docker".
++++ * Fixed "equivalent" typo in README.Debian (Closes: #756395). Thanks Reuben!
++++ * Removed "docker.io" mention in README.Debian (Closes: #756290). Thanks
++++ Olivier!
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Sat, 13 Sep 2014 11:43:17 -0600
++++
++++docker.io (1.0.0~dfsg1-1) unstable; urgency=medium
++++
++++ * Updated to 1.0.0 upstream release. Huzzah!
++++ * I've removed what is commonly called a `button' of patches against
++++ the docker package. Exact patches:
++++ - bash-completion-docker.io.patch
++++ - systemd-docker.io.patch
++++ - sysvinit-provides-docker.io.patch
++++ - zsh-completion-docker.io.patch
++++ - mkimage-docker.io.patch
++++ * I know y'all are guessing why; and the answer's pretty simple -- we're
++++ no longer docker.io(1). Since the src:docker package now ships wmdocker(1),
++++ we can safely declare a breaks/replaces on the pre-wmdocker version of the
++++ package, allowing existing users to safely update, both src:docker and
++++ src:docker.io side. This brings us into line with other distros, which
++++ now ship wmdocker(1) and docker(1).
++++ * As a stop-gap, I'm still shipping a docker.io(1) symlink to allow
++++ migration away.
++++
++++ -- Paul Tagliamonte <paultag@debian.org> Fri, 13 Jun 2014 21:04:53 -0400
++++
++++docker.io (0.11.1~dfsg1-1) unstable; urgency=medium
++++
++++ [ Paul Tagliamonte ]
++++ * Use EnvironmentFile with the systemd unit file. (Closes: #746774)
++++ * Patch out version checking code. (Closes: #747140)
++++ * Remove all host checking for non-amd64 host arches. Let docker build
++++ and run on all platforms now. (Closes: #747139, #739914)
++++
++++ [ Tianon Gravi ]
++++ * Updated to 0.11.1 upstream release.
++++ * Added backported upstream patch for removing RemoteAddr assumptions
++++ that cause events to not be delivered to more than one unix socket
++++ listener.
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Fri, 09 May 2014 17:30:45 -0400
++++
++++docker.io (0.9.1~dfsg1-2) unstable; urgency=medium
++++
++++ * Added upstream apparmor patch to fix newer apparmor versions (such as the
++++ version appearing in Ubuntu 14.04).
++++ * Added mkimage-* docker.io binary name patches (Closes: #740855).
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Tue, 08 Apr 2014 23:19:08 -0400
++++
++++docker.io (0.9.1~dfsg1-1) unstable; urgency=medium
++++
++++ * Updated to 0.9.1 upstream release (Closes: #743424).
++++ * Added cgroupfs-mount dependency (Closes: #742641).
++++ * Added Suggests entries for optional features, chiefly lxc (Closes: #742081).
++++ * Added notes about "root-equivalence" to README.Debian (Closes: #742387).
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Thu, 03 Apr 2014 21:38:30 -0400
++++
++++docker.io (0.9.0+dfsg1-1) unstable; urgency=medium
++++
++++ * Updated README.Debian to not be quite so outdated (Closes: #740850).
++++ * Updated to 0.9.0 upstream release.
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Tue, 11 Mar 2014 22:24:31 -0400
++++
++++docker.io (0.8.1+dfsg1-1) unstable; urgency=medium
++++
++++ * Updated to 0.8.1 upstream release.
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Tue, 25 Feb 2014 20:56:31 -0500
++++
++++docker.io (0.8.0+dfsg1-2) unstable; urgency=medium
++++
++++ [ Tianon Gravi ]
++++ * Added more license notes to debian/copyright (Closes: #738627).
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Sat, 15 Feb 2014 17:51:58 -0500
++++
++++docker.io (0.8.0+dfsg1-1) unstable; urgency=medium
++++
++++ [ Prach Pongpanich ]
++++ * Added zsh completion.
++++
++++ [ Tianon Gravi ]
++++ * Updated to 0.8.0 upstream release.
++++ * Added vim syntax files in new vim-syntax-docker package.
++++ * Added note about minimum recommended kernel version to Description.
++++ * Added contrib/*-integration files in /usr/share/docker.io/contrib.
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Mon, 10 Feb 2014 20:41:10 -0500
++++
++++docker.io (0.7.6+dfsg1-1) unstable; urgency=medium
++++
++++ [ Johan Euphrosine ]
++++ * Updated to 0.7.6.
++++ * Added dependency to gocapability.
++++ * Clean patches.
++++
++++ [ Tianon Gravi ]
++++ * Added contrib/mk* scripts from upstream into /usr/share/docker.io/contrib
++++ (Closes: #736068).
++++ * Added upstream udev rules file to stop device-mapper devices and mounts from
++++ appearing in desktop environments through udisks.
++++
++++ -- Johan Euphrosine <proppy@google.com> Wed, 22 Jan 2014 22:50:47 -0500
++++
++++docker.io (0.7.1+dfsg1-1) unstable; urgency=medium
++++
++++ [ Prach Pongpanich ]
++++ * Fixed "docker: command not found" errors while using bash tab completion
++++ (Closes: #735372).
++++
++++ [ Tianon Gravi ]
++++ * Updated to 0.7.1 upstream release (while we wait for gocapability to be
++++ packaged).
++++ * Added xz-utils recommend which is required for decompressing certain images
++++ from the index.
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Wed, 15 Jan 2014 20:22:34 -0500
++++
++++docker.io (0.6.7+dfsg1-3) unstable; urgency=medium
++++
++++ * Fixed FTBFS on non-amd64 platforms by setting the correct GOPATH.
++++ * Fixed issues with Docker finding a valid dockerinit (Closes: #734758).
++++ * Added aufs-tools dependency.
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Thu, 09 Jan 2014 20:10:20 -0500
++++
++++docker.io (0.6.7+dfsg1-2) unstable; urgency=medium
++++
++++ * Added iptables dependency required for Docker to start.
++++ * Added ca-certificates recommend required for pulling from the index.
++++
++++ -- Tianon Gravi <admwiggin@gmail.com> Wed, 08 Jan 2014 19:14:02 -0500
++++
++++docker.io (0.6.7+dfsg1-1) unstable; urgency=medium
++++
++++ * Initial release (Closes: #706060, #730569)
++++ * Document missing licenses in the source tree. Bad, paultag. Thanks
++++ alteholz.
++++
++++ -- Paul Tagliamonte <paultag@debian.org> Tue, 07 Jan 2014 21:06:10 -0500
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++9
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Source: docker.io
++++Section: admin
++++Priority: optional
++++Maintainer: Paul Tagliamonte <paultag@debian.org>
++++Uploaders: Docker Packaging Team <docker-maint@lists.alioth.debian.org>,
++++ Tianon Gravi <admwiggin@gmail.com>,
++++ Johan Euphrosine <proppy@google.com>
++++Build-Depends: bash-completion,
++++ btrfs-tools,
++++ debhelper (>=9),
++++ dh-golang (>= 1.1),
++++ dh-systemd,
++++ go-md2man,
++++ golang (>= 2:1.2-3~),
++++ golang (>= 2:1.2.1-2~) | golang (<< 2:1.2.1~),
++++ golang (>= 2:1.3-4~) | golang (= 2:1.3-1) | golang (<< 2:1.3~),
++++ golang-context-dev (>= 0.0~git20140604~),
++++ golang-dbus-dev (>= 1~),
++++ golang-go-patricia-dev (>= 1.0.1~),
++++ golang-go-systemd-dev (>= 2~),
++++ golang-go.net-dev (>= 0.0~hg20130530~),
++++ golang-gocapability-dev (>= 0.0~git20140516~),
++++ golang-gosqlite-dev (>= 0.0~hg20130530~),
++++ golang-mux-dev (>= 0.0~git20140505~),
++++ golang-pty-dev (>= 0.0~git20140315.1.67e2db2-1~),
++++ libapparmor-dev,
++++ libdevmapper-dev
++++Standards-Version: 3.9.6
++++Homepage: https://github.com/docker/docker
++++Vcs-Git: git://anonscm.debian.org/docker/docker.io.git
++++Vcs-Browser: http://anonscm.debian.org/cgit/docker/docker.io.git
++++
++++Package: docker.io
++++Architecture: linux-any
++++Depends: adduser, iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
++++Recommends: aufs-tools,
++++ ca-certificates,
++++ cgroupfs-mount | cgroup-lite,
++++ git,
++++ xz-utils,
++++ ${apparmor:Recommends}
++++Replaces: docker (<< 1.5~)
++++Breaks: docker (<< 1.5~)
++++Suggests: btrfs-tools, debootstrap, lxc, rinse
++++Built-Using: ${misc:Built-Using}
++++Description: Linux container runtime
++++ Docker complements kernel namespacing with a high-level API which operates at
++++ the process level. It runs unix processes with strong guarantees of isolation
++++ and repeatability across servers.
++++ .
++++ Docker is a great building block for automating distributed systems:
++++ large-scale web deployments, database clusters, continuous deployment systems,
++++ private PaaS, service-oriented architectures, etc.
++++ .
++++ This package contains the daemon and client. Using docker.io on non-amd64 hosts
++++ is not supported at this time. Please be careful when using it on anything
++++ besides amd64.
++++ .
++++ Also, note that kernel version 3.8 or above is required for proper operation of
++++ the daemon process, and that any lower versions may have subtle and/or glaring
++++ issues.
++++
++++Package: vim-syntax-docker
++++Architecture: all
++++Depends: vim, ${misc:Depends}
++++Recommends: vim-addon-manager
++++Suggests: docker.io
++++Description: Docker container engine - Vim highlighting syntax files
++++ This package provides syntax files for the Vim editor for editing Dockerfiles
++++ from the Docker container engine.
++++
++++Package: golang-docker-dev
++++Architecture: all
++++Depends: ${misc:Depends}
++++Built-Using: ${misc:Built-Using}
++++Description: Externally reusable Go packages included with Docker
++++ These packages are intentionally developed by upstream in such a way that they
++++ are reusable to projects outside Docker and only rely on each other or other
++++ external dependencies to be built.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
++++Upstream-Name: Docker
++++Upstream-Contact: Docker, Inc. <support@docker.com>
++++Source: https://github.com/docker/docker
++++
++++Files: *
++++Copyright: 2012-2014 Docker, Inc. <support@docker.com>
++++License: Apache-2.0
++++
++++Files: debian/*
++++Copyright: 2013-2014 Daniel Mizyrycki <daniel@docker.com>
++++ 2013-2014 Paul Tagliamonte <paultag@debian.org>
++++ 2012-2014 Michael Stapelberg <stapelberg@debian.org>
++++ 2013-2014 Tianon Gravi <admwiggin@gmail.com>
++++ 2013-2014 Johan Euphrosine <proppy@google.com>
++++ 2014 Prach Pongpanich <prachpub@gmail.com>
++++License: Apache-2.0
++++
++++Files: contrib/init/openrc/docker.initd
++++Copyright: 1999-2013 Gentoo Foundation
++++License: GPL-2
++++
++++Files: contrib/syntax/vim/*
++++Copyright: 2013 Honza Pokorny
++++License: BSD-2-clause
++++
++++Files: pkg/mflag/*
++++Copyright: 2014 The Docker & Go Authors
++++License: BSD-3-clause-Google
++++
++++Files: contrib/completion/zsh/*
++++Copyright: 2013-2014 Felix Riedel
++++License: BSD-3-clause-Generic
++++
++++License: Apache-2.0
++++ Licensed under the Apache License, Version 2.0 (the "License");
++++ you may not use this file except in compliance with the License.
++++ You may obtain a copy of the License at
++++ .
++++ http://www.apache.org/licenses/LICENSE-2.0
++++ .
++++ Unless required by applicable law or agreed to in writing, software
++++ distributed under the License is distributed on an "AS IS" BASIS,
++++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++++ See the License for the specific language governing permissions and
++++ limitations under the License.
++++ .
++++ On Debian systems, the complete text of the Apache version 2.0 license
++++ can be found in "/usr/share/common-licenses/Apache-2.0".
++++
++++License: GPL-2
++++ This file is part of Buildbot. Buildbot is free software: you can
++++ redistribute it and/or modify it under the terms of the GNU General Public
++++ License as published by the Free Software Foundation, version 2.
++++ .
++++ This program is distributed in the hope that it will be useful, but WITHOUT
++++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
++++ details.
++++ .
++++ You should have received a copy of the GNU General Public License along with
++++ this program; if not, write to the Free Software Foundation, Inc., 51
++++ Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++++ .
++++ On Debian systems, the complete text of the Apache version 2.0 license
++++ can be found in "/usr/share/common-licenses/GPL-2".
++++
++++License: BSD-2-clause
++++ Redistribution and use in source and binary forms, with or without
++++ modification, are permitted provided that the following conditions are met:
++++ .
++++ 1. Redistributions of source code must retain the above copyright
++++ notice, this list of conditions and the following disclaimer.
++++ 2. Redistributions in binary form must reproduce the above copyright
++++ notice, this list of conditions and the following disclaimer in the
++++ documentation and/or other materials provided with the distribution.
++++ .
++++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
++++ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++++ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++++ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
++++ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++++ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++++ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++++ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++++ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++++
++++License: BSD-3-clause-Google
++++ Redistribution and use in source and binary forms, with or without
++++ modification, are permitted provided that the following conditions are
++++ met:
++++ .
++++ * Redistributions of source code must retain the above copyright
++++ notice, this list of conditions and the following disclaimer.
++++ * Redistributions in binary form must reproduce the above
++++ copyright notice, this list of conditions and the following disclaimer
++++ in the documentation and/or other materials provided with the
++++ distribution.
++++ * Neither the name of Google Inc. nor the names of its
++++ contributors may be used to endorse or promote products derived from
++++ this software without specific prior written permission.
++++ .
++++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++++ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++++
++++License: BSD-3-clause-Generic
++++ Redistribution and use in source and binary forms, with or without
++++ modification, are permitted provided that the following conditions are met:
++++ * Redistributions of source code must retain the above copyright
++++ notice, this list of conditions and the following disclaimer.
++++ * Redistributions in binary form must reproduce the above copyright
++++ notice, this list of conditions and the following disclaimer in the
++++ documentation and/or other materials provided with the distribution.
++++ * Neither the name of the <organization> nor the
++++ names of its contributors may be used to endorse or promote products
++++ derived from this software without specific prior written permission.
++++ .
++++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
++++ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++++ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++++ DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
++++ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++++ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++++ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++++ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++++ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++contrib/completion/bash/docker
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++../contrib/init/sysvinit-debian/docker.default
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++../contrib/init/sysvinit-debian/docker
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++../contrib/init/upstart/docker.conf
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++contrib/*-integration usr/share/docker.io/contrib/
++++contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/
++++contrib/init/systemd/docker.service lib/systemd/system/
++++contrib/init/systemd/docker.socket lib/systemd/system/
++++contrib/mk* usr/share/docker.io/contrib/
++++contrib/nuke-graph-directory.sh usr/share/docker.io/contrib/
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++docker.io binary: statically-linked-binary usr/lib/docker.io/dockerinit
++++# Yes, I assure you this is normal. Damnit, go.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++mv_conffile /etc/bash_completion.d/docker.io /etc/bash_completion.d/docker 1.2.0~
++++mv_conffile /etc/default/docker.io /etc/default/docker 1.2.0~
++++mv_conffile /etc/init.d/docker.io /etc/init.d/docker 1.2.0~
++++mv_conffile /etc/init/docker.io.conf /etc/init/docker.conf 1.2.0~
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++docs/man/man*/*
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++#!/bin/sh
++++set -e
++++
++++case "$1" in
++++ configure)
++++ if [ -z "$2" ]; then
++++ addgroup --system docker
++++ fi
++++ ;;
++++ abort-*)
++++ # How'd we get here??
++++ exit 1
++++ ;;
++++ *)
++++ ;;
++++esac
++++
++++#DEBHELPER#
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++../contrib/udev/80-docker.rules
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++README.md
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++[DEFAULT]
++++cleaner = fakeroot debian/rules clean
++++pristine-tar = False
++++
++++[git-buildpackage]
++++export-dir = ../build-area/
++++tarball-dir = ../tarballs/
++++
++++[git-dch]
++++id-length = 7
++++meta = True
++++auto = True
++++full = True
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++#!/bin/bash
++++set -e
++++
++++mkdir -p "${DOCKER_TARBALLS:=../tarballs}"
++++
++++pkg="$(dpkg-parsechangelog -SSource)"
++++ver="$(dpkg-parsechangelog -SVersion)"
++++origVer="${ver%-*}" # strip everything from the last dash
++++origVer="$(echo "$origVer" | sed -r 's/^[0-9]+://')" # strip epoch
++++upstreamVer="${origVer%%[+~]*}"
++++origTarballPrefix="${DOCKER_TARBALLS}/${pkg}_${origVer}.orig"
++++unprunedTarballPrefix="${DOCKER_TARBALLS}/${pkg}_${upstreamVer}.orig"
++++
++++if command -v curl &> /dev/null; then
++++ curl='curl -sSL'
++++elif command -v wget &> /dev/null; then
++++ curl='wget -qO-'
++++else
++++ echo >&2 'error: missing "curl" or "wget" - install one or the other'
++++ exit 1
++++fi
++++
++++get_hack_vendor() {
++++ if [ -e "${origTarballPrefix}.tar.gz" ]; then
++++ # if we have the main orig tarball handy, let's prefer that
++++ tar -xzOf "${origTarballPrefix}.tar.gz" --wildcards '*/hack/vendor.sh'
++++ else
++++ # but fall back to grabbing it raw from github otherwise
++++ $curl "https://raw.githubusercontent.com/docker/docker/v${upstreamVer}/hack/vendor.sh"
++++ fi
++++}
++++
++++if libcontainerCommit="$(get_hack_vendor | grep -m1 '^clone git github.com/docker/libcontainer ' | cut -d' ' -f4)" && [ "$libcontainerCommit" ]; then
++++ $curl "https://github.com/docker/libcontainer/archive/${libcontainerCommit}.tar.gz" > "${unprunedTarballPrefix}-libcontainer.tar.gz"
++++
++++ echo "successfully fetched ${unprunedTarballPrefix}-libcontainer.tar.gz"
++++ echo " (from libcontainer commit $libcontainerCommit)"
++++
++++ "$(dirname "$(readlink -f "$BASH_SOURCE")")/../repack.sh" --upstream-version "$upstreamVer" "${unprunedTarballPrefix}-libcontainer.tar.gz"
++++fi
++++
++++if libtrustCommit="$(get_hack_vendor | grep -m1 '^clone git github.com/docker/libtrust ' | cut -d' ' -f4)" && [ "$libtrustCommit" ]; then
++++ $curl "https://github.com/docker/libtrust/archive/${libtrustCommit}.tar.gz" > "${unprunedTarballPrefix}-libtrust.tar.gz"
++++
++++ echo "successfully fetched ${unprunedTarballPrefix}-libtrust.tar.gz"
++++ echo " (from libtrust commit $libtrustCommit)"
++++
++++ "$(dirname "$(readlink -f "$BASH_SOURCE")")/../repack.sh" --upstream-version "$upstreamVer" "${unprunedTarballPrefix}-libtrust.tar.gz"
++++fi
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++#!/bin/bash
++++set -e
++++
++++uVersion="$1"
++++dVersion="$2"
++++
++++if [ -z "$uVersion" ]; then
++++ uVersion="$(cat VERSION)"
++++fi
++++if [ -z "$dVersion" ]; then
++++ dVersion="$(dpkg-parsechangelog --show-field Version)"
++++fi
++++
++++if [ "${uVersion%-dev}" = "$uVersion" ]; then
++++ # this is a straight-up release! easy-peasy
++++ exec awk '/^'"$uVersion"':/ { print $2 }' debian/upstream-version-gitcommits
++++fi
++++
++++# must be a nightly, so let's look for clues about what the git commit is
++++
++++if git rev-parse &> /dev/null; then
++++ # well, this will be easy ;)
++++ exec git rev-parse --short HEAD
++++fi
++++
++++if [ "${dVersion#*+*+}" != "$dVersion" ]; then
++++ # must be something like "1.1.2+10013+8c38a3d-1~utopic1" (nightly!)
++++ commit="${dVersion#*+*+}"
++++ commit="${commit%%-*}"
++++ exec echo "$commit"
++++fi
++++
++++# unknown...
++++echo >&2 'warning: unable to determine DOCKER_GITCOMMIT'
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++From: Tianon Gravi <admwiggin@gmail.com>
++++Subject: Fix a bashism and some minor bugs in nuke-graph-directory.sh
++++Applied-Upstream: https://github.com/docker/docker/pull/9637 (estimated to be in Docker 1.5.0)
++++
++++diff --git a/contrib/nuke-graph-directory.sh b/contrib/nuke-graph-directory.sh
++++index f44c45a..8d12a9d 100755
++++--- a/contrib/nuke-graph-directory.sh
+++++++ b/contrib/nuke-graph-directory.sh
++++@@ -50,9 +50,10 @@ for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do
++++ done
++++
++++ # now, let's go destroy individual btrfs subvolumes, if any exist
++++-if command -v btrfs &> /dev/null; then
+++++if command -v btrfs > /dev/null 2>&1; then
++++ root="$(df "$dir" | awk 'NR>1 { print $NF }')"
++++- for subvol in $(btrfs subvolume list -o "$root" 2>/dev/null | awk -F' path ' '{ print $2 }'); do
+++++ root="${root#/}" # if root is "/", we want it to become ""
+++++ for subvol in $(btrfs subvolume list -o "$root/" 2>/dev/null | awk -F' path ' '{ print $2 }' | sort -r); do
++++ subvolDir="$root/$subvol"
++++ if dir_in_dir "$subvolDir" "$dir"; then
++++ ( set -x; btrfs subvolume delete "$subvolDir" )
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Author: Paul R. Tagliamonte <paultag@debian.org>
++++Last-Update: 2014-05-07
++++Description: Use EnvironmentFile with the systemd unit file.
++++Bug-Debian: http://bugs.debian.org/746774
++++Forwarded: no
++++
++++diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service
++++index 0cb31e3..240961c 100644
++++--- a/contrib/init/systemd/docker.service
+++++++ b/contrib/init/systemd/docker.service
++++@@ -5,7 +5,8 @@ After=network.target docker.socket
++++ Requires=docker.socket
++++
++++ [Service]
++++-ExecStart=/usr/bin/docker -d -H fd://
+++++EnvironmentFile=-/etc/default/docker
+++++ExecStart=/usr/bin/docker -d -H fd:// $DOCKER_OPTS
++++ LimitNOFILE=1048576
++++ LimitNPROC=1048576
++++
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Author: Paul R. Tagliamonte <paultag@debian.org>
++++Last-Update: 2014-05-07
++++Description: Let this build on all platforms.
++++Applied-Upstream: haha-lololol
++++Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=747139
++++Forwarded: not-needed
++++
++++diff --git a/daemon/daemon.go b/daemon/daemon.go
++++index 0d27549..0bd9ff6 100644
++++--- a/daemon/daemon.go
+++++++ b/daemon/daemon.go
++++@@ -1073,7 +1073,7 @@ func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*i
++++ func checkKernelAndArch() error {
++++ // Check for unsupported architectures
++++ if runtime.GOARCH != "amd64" {
++++- return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
+++++ fmt.Fprintf(os.Stderr, "WARNING: The Docker runtime currently only officially supports amd64 (not %s). THIS BUILD IS NOT OFFICIAL AND WILL NOT BE SUPPORTED BY DOCKER UPSTREAM.", runtime.GOARCH)
++++ }
++++ // Check for unsupported kernel versions
++++ // FIXME: it would be cleaner to not test for specific versions, but rather
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Description: Docker is unsupported on kernels < 3.8, so this turns the warning into a fatal error (hard failure)
++++Forwarded: not-needed
++++Author: Tianon Gravi <admwiggin@gmail.com>
++++Reviewed-by: Paul Tagliamonte <paultag@debian.org>
++++
++++diff --git a/daemon/daemon.go b/daemon/daemon.go
++++index 632b9ab..5df7a54 100644
++++--- a/daemon/daemon.go
+++++++ b/daemon/daemon.go
++++@@ -1129,7 +1129,7 @@ func checkKernelAndArch() error {
++++ } else {
++++ if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
++++ if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
++++- log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+++++ log.Fatalf("ERROR: You are running Linux kernel version %s, which is unsupported for running Docker. Please upgrade your kernel to 3.8+.", k.String())
++++ }
++++ }
++++ }
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++# Once upstream kills the archive/tar vendoring, remove this patch
++++upstream-patched-archive-tar.patch
++++
++++# Remove this in Docker 1.5.0
++++9637-fix-nuke-bashism.patch
++++
++++# If upstream ever resolves https://github.com/docker/docker/issues/8969 in a
++++# reasonable way, remove this patch.
++++fatal-error-old-kernels.patch
++++
++++# Once upstream adds EnvFile, remove this patch.
++++change-system-unit-env-file.patch
++++# See also https://github.com/docker/docker/pull/7220#issuecomment-50076589
++++
++++# Upstream deltas:
++++# -> Let there be light on non-amd64
++++enable-non-amd64-arches.patch
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Author: Tianon Gravi <admwiggin@gmail.com>
++++Description: "archive/tar" patch for upstreamed xattrs patch
++++Applied-Upstream: when golang-1.4 is broadly packaged (scheduled to be released 2014-12-01)
++++
++++diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go
++++index da51254..de232cb 100644
++++--- a/graph/tags_unit_test.go
+++++++ b/graph/tags_unit_test.go
++++@@ -7,11 +7,11 @@ import (
++++ "path"
++++ "testing"
++++
+++++ "archive/tar"
++++ "github.com/docker/docker/daemon/graphdriver"
++++ _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
++++ "github.com/docker/docker/image"
++++ "github.com/docker/docker/utils"
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
++++ )
++++
++++ const (
++++diff --git a/integration-cli/utils.go b/integration-cli/utils.go
++++index f3f128e..abea8a3 100644
++++--- a/integration-cli/utils.go
+++++++ b/integration-cli/utils.go
++++@@ -16,7 +16,7 @@ import (
++++ "testing"
++++ "time"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++ )
++++
++++ func getExitCode(err error) (int, error) {
++++diff --git a/integration/api_test.go b/integration/api_test.go
++++index 8fa295e..9c1ecb2 100644
++++--- a/integration/api_test.go
+++++++ b/integration/api_test.go
++++@@ -14,11 +14,11 @@ import (
++++ "testing"
++++ "time"
++++
+++++ "archive/tar"
++++ "github.com/docker/docker/api"
++++ "github.com/docker/docker/api/server"
++++ "github.com/docker/docker/engine"
++++ "github.com/docker/docker/runconfig"
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
++++ )
++++
++++ func TestGetContainersJSON(t *testing.T) {
++++diff --git a/integration/utils_test.go b/integration/utils_test.go
++++index e1abfa7..c5bc3af 100644
++++--- a/integration/utils_test.go
+++++++ b/integration/utils_test.go
++++@@ -13,7 +13,7 @@ import (
++++ "testing"
++++ "time"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++
++++ "github.com/docker/docker/builtins"
++++ "github.com/docker/docker/daemon"
++++diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go
++++index 155145f..0c41f1b 100644
++++--- a/pkg/archive/archive.go
+++++++ b/pkg/archive/archive.go
++++@@ -16,7 +16,7 @@ import (
++++ "strings"
++++ "syscall"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++
++++ "github.com/docker/docker/pkg/fileutils"
++++ "github.com/docker/docker/pkg/log"
++++diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go
++++index 7c9db44..39c8caf 100644
++++--- a/pkg/archive/archive_test.go
+++++++ b/pkg/archive/archive_test.go
++++@@ -12,7 +12,7 @@ import (
++++ "testing"
++++ "time"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++ )
++++
++++ func TestCmdStreamLargeStderr(t *testing.T) {
++++diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go
++++index 5fbdcc9..fa96bb8 100644
++++--- a/pkg/archive/changes.go
+++++++ b/pkg/archive/changes.go
++++@@ -10,7 +10,7 @@ import (
++++ "syscall"
++++ "time"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++
++++ "github.com/docker/docker/pkg/log"
++++ "github.com/docker/docker/pkg/pools"
++++diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go
++++index 5ed1a1d..f20fcb8 100644
++++--- a/pkg/archive/diff.go
+++++++ b/pkg/archive/diff.go
++++@@ -9,7 +9,7 @@ import (
++++ "strings"
++++ "syscall"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++
++++ "github.com/docker/docker/pkg/pools"
++++ )
++++diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go
++++index 758c411..1af10fe 100644
++++--- a/pkg/archive/diff_test.go
+++++++ b/pkg/archive/diff_test.go
++++@@ -3,7 +3,7 @@ package archive
++++ import (
++++ "testing"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++ )
++++
++++ func TestApplyLayerInvalidFilenames(t *testing.T) {
++++diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go
++++index 3624fe5..8e26a11 100644
++++--- a/pkg/archive/utils_test.go
+++++++ b/pkg/archive/utils_test.go
++++@@ -9,7 +9,7 @@ import (
++++ "path/filepath"
++++ "time"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++ )
++++
++++ var testUntarFns = map[string]func(string, io.Reader) error{
++++diff --git a/pkg/archive/wrap.go b/pkg/archive/wrap.go
++++index b8b6019..dfb335c 100644
++++--- a/pkg/archive/wrap.go
+++++++ b/pkg/archive/wrap.go
++++@@ -1,8 +1,8 @@
++++ package archive
++++
++++ import (
+++++ "archive/tar"
++++ "bytes"
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
++++ "io/ioutil"
++++ )
++++
++++diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go
++++index 6581f3f..ed36494 100644
++++--- a/pkg/tarsum/tarsum.go
+++++++ b/pkg/tarsum/tarsum.go
++++@@ -11,7 +11,7 @@ import (
++++ "strconv"
++++ "strings"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++
++++ "github.com/docker/docker/pkg/log"
++++ )
++++diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go
++++index 1e06cda..ef910c3 100644
++++--- a/pkg/tarsum/tarsum_test.go
+++++++ b/pkg/tarsum/tarsum_test.go
++++@@ -15,7 +15,7 @@ import (
++++ "os"
++++ "testing"
++++
++++- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+++++ "archive/tar"
++++ )
++++
++++ type testLayer struct {
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++#!/bin/bash
++++# Taken from the X Strike Force Build System
++++
++++set -e
++++
++++if ! [ -d debian/repack/prune ]; then
++++ exit 0
++++fi
++++
++++if [ "x$1" != x--upstream-version ]; then
++++ exit 1
++++fi
++++
++++version="$2"
++++filename="$3"
++++
++++if [ -z "$version" ] || ! [ -f "$filename" ]; then
++++ exit 1
++++fi
++++
++++dir="$(pwd)"
++++tempdir="$(mktemp -d)"
++++
++++cd "$tempdir"
++++tar xf "$dir/$filename"
++++cat "$dir"/debian/repack/prune/* | while read file; do
++++ if [ -e */"$file" ]; then
++++ echo "Pruning $file"
++++ rm -rf */"$file"
++++ fi
++++done
++++
++++dfsgfilename="$filename"
++++if [[ "$dfsgfilename" != *dfsg* ]]; then
++++ dfsgfilename="${dfsgfilename/.orig/~dfsg1.orig}"
++++fi
++++tar -czf ${dir}/${dfsgfilename} *
++++cd "$dir"
++++rm -rf "$tempdir"
++++echo "Done pruning upstream tarball into $dfsgfilename"
++++
++++exit 0
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++./docs/sources
++++./docs/theme
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++./vendor
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++#!/usr/bin/make -f
++++# -*- makefile -*-
++++
++++# Tell dh-golang where this package lives upstream
++++export DH_GOPKG := github.com/docker/docker
++++# Tell dh-golang that we DO need subpackages
++++export DH_GOLANG_INSTALL_ALL := 1
++++
++++LIBCONTAINER_GOPKG = github.com/docker/libcontainer
++++LIBTRUST_GOPKG = github.com/docker/libtrust
++++
++++# temporary build path (see http://golang.org/doc/code.html#GOPATH)
++++export GOPATH := $(CURDIR)/obj-$(DEB_BUILD_GNU_TYPE)
++++
++++# a few helpful variables for deduplication
++++INITDIR = /usr/lib/docker.io
++++INITPATH = ${INITDIR}/dockerinit
++++DOCKER_VERSION = $(shell cat VERSION)
++++
++++export DOCKER_GITCOMMIT := $(shell ./debian/helpers/gitcommit.sh $(DOCKER_VERSION))
++++export DOCKER_INITPATH := ${INITPATH}
++++
++++# good old Ubuntu needs AppArmor
++++export DOCKER_BUILDTAGS := apparmor
++++
++++
++++APPARMOR_RECOMMENDS = $(shell dpkg-vendor --is Ubuntu && echo apparmor)
++++override_dh_gencontrol:
++++ echo 'apparmor:Recommends=$(APPARMOR_RECOMMENDS)' >> debian/docker.io.substvars
++++ dh_gencontrol
++++
++++
++++override_dh_auto_build:
++++ @bash -c '{ [ "$$DOCKER_GITCOMMIT" ]; } || { echo; echo "error: missing DOCKER_GITCOMMIT - see debian/upstream-version-gitcommits"; echo; exit 2; } >&2'
++++
++++ @# this is especially for easier build-testing of nightlies
++++ @[ -d libcontainer ] || { [ -d vendor/src/$(LIBCONTAINER_GOPKG) ] && ln -sf vendor/src/$(LIBCONTAINER_GOPKG) libcontainer; }
++++ @[ -d libtrust ] || { [ -d vendor/src/$(LIBTRUST_GOPKG) ] && ln -sf vendor/src/$(LIBTRUST_GOPKG) libtrust; }
++++
++++ @# we need to make sure our multitarball deps are in our GOPATH
++++ @mkdir -p "$$GOPATH/src/$(dir $(LIBCONTAINER_GOPKG))" "$$GOPATH/src/$(dir $(LIBTRUST_GOPKG))"
++++ ln -sf "$$(readlink -f libcontainer)" "$(GOPATH)/src/$(dir $(LIBCONTAINER_GOPKG))"
++++ ln -sf "$$(readlink -f libtrust)" "$(GOPATH)/src/$(dir $(LIBTRUST_GOPKG))"
++++
++++ ./hack/make.sh dynbinary
++++
++++ # compile man pages
++++ ./docs/man/md2man-all.sh
++++
++++
++++override_dh_auto_install:
++++ dh_auto_install
++++
++++ # install docker binary
++++ mkdir -p debian/docker.io/usr/bin
++++ mv bundles/${DOCKER_VERSION}/dynbinary/docker-${DOCKER_VERSION} debian/docker.io/usr/bin/docker
++++
++++ # install dockerinit binary
++++ mkdir -p debian/docker.io/${INITDIR}
++++ mv bundles/${DOCKER_VERSION}/dynbinary/dockerinit-${DOCKER_VERSION} debian/docker.io/${INITPATH}
++++
++++ # Most of the source of docker does not make a library,
++++ # so only ship the reusable parts (and in a separate package).
++++ mkdir -p debian/golang-docker-dev/usr/share/gocode/src/${DH_GOPKG}
++++ mv -v \
++++ debian/tmp/usr/share/gocode/src/${DH_GOPKG}/pkg \
++++ debian/golang-docker-dev/usr/share/gocode/src/${DH_GOPKG}/
++++ mkdir -p debian/golang-docker-dev/usr/share/gocode/src/$(dir $(LIBCONTAINER_GOPKG))
++++ @# this is especially for easier build-testing of nightlies
++++ @[ -d debian/tmp/usr/share/gocode/src/${DH_GOPKG}/libcontainer ] || { [ -d debian/tmp/usr/share/gocode/src/${DH_GOPKG}/vendor/src/$(LIBCONTAINER_GOPKG) ] && mv debian/tmp/usr/share/gocode/src/${DH_GOPKG}/vendor/src/$(LIBCONTAINER_GOPKG) debian/tmp/usr/share/gocode/src/${DH_GOPKG}/libcontainer; }
++++ mv -v \
++++ debian/tmp/usr/share/gocode/src/${DH_GOPKG}/libcontainer \
++++ debian/golang-docker-dev/usr/share/gocode/src/$(dir $(LIBCONTAINER_GOPKG))
++++ rm -rf debian/tmp/usr/share/gocode
++++
++++
++++# the SHA1 of dockerinit is important: don't strip it
++++# also, Go has lots of problems with stripping, so just don't
++++override_dh_strip:
++++
++++
++++override_dh_auto_test:
++++
++++
++++override_dh_installinit:
++++ dh_installinit --name=docker
++++
++++override_dh_systemd_enable:
++++ dh_systemd_enable -pdocker.io --no-enable docker.service
++++ dh_systemd_enable -pdocker.io docker.socket
++++
++++override_dh_systemd_start:
++++ dh_systemd_start -pdocker.io --no-start docker.service
++++ dh_systemd_start -pdocker.io docker.socket
++++
++++
++++override_dh_installchangelogs:
++++ dh_installchangelogs CHANGELOG.md
++++
++++
++++override_dh_installudev:
++++ # use priority z80 to match the upstream priority of 80
++++ dh_installudev --priority=z80
++++
++++
++++override_dh_auto_clean:
++++ # bundles is created by hack/make.sh
++++ # docs/man/man*/ is created by docs/man/md2man-all.sh
++++ rm -rf bundles docs/man/man*/
++++
++++
++++%:
++++ dh $@ --buildsystem=golang --with=golang,systemd,bash-completion
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++docker.io source: source-contains-unsafe-symlink utils/testdata/fs/g
++++docker.io source: source-contains-unsafe-symlink utils/testdata/fs/a/d
++++# Hilariously, these are used to test unsafe symlinks.
++++# much testing
++++# wow
++++# very unsafe
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++3.0 (quilt)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++# To determine the proper value for this, download
++++# https://get.docker.io/builds/Linux/x86_64/docker-VERSION, chmod +x, and then
++++# run ./docker-VERSION -v, which will list the exact build hash needed.
++++
++++0.5.3: 17c92b8
++++0.6.0: f4a4f1c
++++0.6.1: 5105263
++++0.6.2: 081543c
++++0.6.3: b0a49a3
++++0.6.4: 2f74b1c
++++0.6.5: 3ff8459
++++0.6.6: 6d42040
++++0.6.7: cb48ecc
++++0.7.0: 0d078b6
++++0.7.1: 88df052
++++0.7.2: 28b162e
++++0.7.3: 8502ad4
++++0.7.4: 010d74e
++++0.7.5: c348c04
++++0.7.6: bc3b2ec
++++0.8.0: cc3a8c8
++++0.8.1: a1598d1
++++0.9.0: 2b3fdf2
++++0.9.1: 3600720
++++0.10.0: dc9c28f
++++0.11.0: 15209c3
++++0.11.1: fb99f99
++++0.12.0: 14680bf
++++1.0.0: 63fe64c
++++1.0.1: 990021a
++++1.1.0: 79812e3
++++1.1.1: bd609d2
++++1.1.2: d84a070
++++1.2.0: fa7b24f
++++1.3.0: c78088f
++++1.3.1: 4e9bbfa
++++1.3.2: 39fa2fa
++++1.3.3: d344625
++++1.4.0: 4595d4f
++++1.4.1: 5bc2ff8
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++contrib/syntax/vim/doc/* /usr/share/vim/addons/doc/
++++contrib/syntax/vim/ftdetect/* /usr/share/vim/addons/ftdetect/
++++contrib/syntax/vim/syntax/* /usr/share/vim/addons/syntax/
++++debian/vim-syntax-docker.yaml /usr/share/vim/registry/
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++addon: dockerfile
++++description: "Addon to highlight Docker's Dockerfiles"
++++files:
++++ - doc/dockerfile.txt
++++ - ftdetect/dockerfile.vim
++++ - syntax/dockerfile.vim
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++version=3
++++opts=\
++++dversionmangle=s/[+~](debian|dfsg|ds|deb)\d*$//,\
++++uversionmangle=s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha)\d*)$/$1~$2/,\
++++filenamemangle=s/.+\/v(\d\S*)\.tar\.gz/docker.io_$1.orig.tar.gz/ \
++++ https://github.com/docker/docker/tags .*/v(\d\S*)\.tar\.gz debian ./debian/repack.sh
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++language: go
++++go: 1.3
++++
++++# let us have pretty experimental Docker-based Travis workers
++++sudo: false
++++
++++env:
++++ - TRAVIS_GLOBAL_WTF=1
++++ - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=1
++++ - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=0
++++# - _GOOS=linux _GOARCH=386 CGO_ENABLED=1 # TODO add this once Travis can handle it (https://github.com/travis-ci/travis-ci/issues/2207#issuecomment-49625061)
++++ - _GOOS=linux _GOARCH=386 CGO_ENABLED=0
++++ - _GOOS=linux _GOARCH=arm CGO_ENABLED=0
++++
++++install:
++++ - go get code.google.com/p/go.tools/cmd/cover
++++ - mkdir -pv "${GOPATH%%:*}/src/github.com/docker" && [ -d "${GOPATH%%:*}/src/github.com/docker/libcontainer" ] || ln -sv "$(readlink -f .)" "${GOPATH%%:*}/src/github.com/docker/libcontainer"
++++ - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then
++++ gvm cross "$_GOOS" "$_GOARCH";
++++ export GOOS="$_GOOS" GOARCH="$_GOARCH";
++++ fi
++++ - export GOPATH="$GOPATH:$(pwd)/vendor"
++++ - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go env; fi
++++ - go get -d -v ./... # TODO remove this if /docker/docker gets purged from our includes
++++ - if [ "$TRAVIS_GLOBAL_WTF" ]; then
++++ export DOCKER_PATH="${GOPATH%%:*}/src/github.com/docker/docker";
++++ mkdir -p "$DOCKER_PATH/hack/make";
++++ ( cd "$DOCKER_PATH/hack/make" && wget -c 'https://raw.githubusercontent.com/docker/docker/master/hack/make/'{.validate,validate-dco,validate-gofmt} );
++++ sed -i 's!docker/docker!docker/libcontainer!' "$DOCKER_PATH/hack/make/.validate";
++++ fi
++++
++++script:
++++ - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-dco"; fi
++++ - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-gofmt"; fi
++++ - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then make direct-build; fi
++++ - if [ -z "$TRAVIS_GLOBAL_WTF" -a "$GOARCH" != 'arm' ]; then make direct-test-short; fi
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++# The libcontainer Contributors' Guide
++++
++++Want to hack on libcontainer? Awesome! Here are instructions to get you
++++started. They are probably not perfect, please let us know if anything
++++feels wrong or incomplete.
++++
++++## Reporting Issues
++++
++++When reporting [issues](https://github.com/docker/libcontainer/issues)
++++on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc),
++++the output of `uname -a`. Please include the steps required to reproduce
++++the problem if possible and applicable.
++++This information will help us review and fix your issue faster.
++++
++++## Development Environment
++++
++++*Add instructions on setting up the development environment.*
++++
++++## Contribution Guidelines
++++
++++### Pull requests are always welcome
++++
++++We are always thrilled to receive pull requests, and do our best to
++++process them as fast as possible. Not sure if that typo is worth a pull
++++request? Do it! We will appreciate it.
++++
++++If your pull request is not accepted on the first try, don't be
++++discouraged! If there's a problem with the implementation, hopefully you
++++received feedback on what to improve.
++++
++++We're trying very hard to keep libcontainer lean and focused. We don't want it
++++to do everything for everybody. This means that we might decide against
++++incorporating a new feature. However, there might be a way to implement
++++that feature *on top of* libcontainer.
++++
++++### Discuss your design on the mailing list
++++
++++We recommend discussing your plans [on the mailing
++++list](https://groups.google.com/forum/?fromgroups#!forum/libcontainer)
++++before starting to code - especially for more ambitious contributions.
++++This gives other contributors a chance to point you in the right
++++direction, give feedback on your design, and maybe point out if someone
++++else is working on the same thing.
++++
++++### Create issues...
++++
++++Any significant improvement should be documented as [a GitHub
++++issue](https://github.com/docker/libcontainer/issues) before anybody
++++starts working on it.
++++
++++### ...but check for existing issues first!
++++
++++Please take a moment to check that an issue doesn't already exist
++++documenting your bug report or improvement proposal. If it does, it
++++never hurts to add a quick "+1" or "I have this problem too". This will
++++help prioritize the most common problems and requests.
++++
++++### Conventions
++++
++++Fork the repo and make changes on your fork in a feature branch:
++++
++++- If it's a bugfix branch, name it XXX-something where XXX is the number of the
++++ issue
++++- If it's a feature branch, create an enhancement issue to announce your
++++ intentions, and name it XXX-something where XXX is the number of the issue.
++++
++++Submit unit tests for your changes. Go has a great test framework built in; use
++++it! Take a look at existing tests for inspiration. Run the full test suite on
++++your branch before submitting a pull request.
++++
++++Update the documentation when creating or modifying features. Test
++++your documentation changes for clarity, concision, and correctness, as
++++well as a clean documentation build. See ``docs/README.md`` for more
++++information on building the docs and how docs get released.
++++
++++Write clean code. Universally formatted code promotes ease of writing, reading,
++++and maintenance. Always run `gofmt -s -w file.go` on each changed file before
++++committing your changes. Most editors have plugins that do this automatically.
++++
++++Pull requests descriptions should be as clear as possible and include a
++++reference to all the issues that they address.
++++
++++Pull requests must not contain commits from other users or branches.
++++
++++Commit messages must start with a capitalized and short summary (max. 50
++++chars) written in the imperative, followed by an optional, more detailed
++++explanatory text which is separated from the summary by an empty line.
++++
++++Code review comments may be added to your pull request. Discuss, then make the
++++suggested modifications and push additional commits to your feature branch. Be
++++sure to post a comment after pushing. The new commits will show up in the pull
++++request automatically, but the reviewers will not be notified unless you
++++comment.
++++
++++Before the pull request is merged, make sure that you squash your commits into
++++logical units of work using `git rebase -i` and `git push -f`. After every
++++commit the test suite should be passing. Include documentation changes in the
++++same commit so that a revert would remove all traces of the feature or fix.
++++
++++Commits that fix or close an issue should include a reference like `Closes #XXX`
++++or `Fixes #XXX`, which will automatically close the issue when merged.
++++
++++### Testing
++++
++++Make sure you include suitable tests, preferably unit tests, in your pull request
++++and that all the tests pass.
++++
++++*Instructions for running tests to be added.*
++++
++++### Merge approval
++++
++++libcontainer maintainers use LGTM (looks good to me) in comments on the code review
++++to indicate acceptance.
++++
++++A change requires LGTMs from at lease two maintainers. One of those must come from
++++a maintainer of the component affected. For example, if a change affects `netlink/`
++++and `security`, it needs at least one LGTM from a maintainer of each. Maintainers
++++only need one LGTM as presumably they LGTM their own change.
++++
++++For more details see [MAINTAINERS.md](MAINTAINERS.md)
++++
++++### Sign your work
++++
++++The sign-off is a simple line at the end of the explanation for the
++++patch, which certifies that you wrote it or otherwise have the right to
++++pass it on as an open-source patch. The rules are pretty simple: if you
++++can certify the below (from
++++[developercertificate.org](http://developercertificate.org/)):
++++
++++```
++++Developer Certificate of Origin
++++Version 1.1
++++
++++Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
++++660 York Street, Suite 102,
++++San Francisco, CA 94110 USA
++++
++++Everyone is permitted to copy and distribute verbatim copies of this
++++license document, but changing it is not allowed.
++++
++++
++++Developer's Certificate of Origin 1.1
++++
++++By making a contribution to this project, I certify that:
++++
++++(a) The contribution was created in whole or in part by me and I
++++ have the right to submit it under the open source license
++++ indicated in the file; or
++++
++++(b) The contribution is based upon previous work that, to the best
++++ of my knowledge, is covered under an appropriate open source
++++ license and I have the right under that license to submit that
++++ work with modifications, whether created in whole or in part
++++ by me, under the same open source license (unless I am
++++ permitted to submit under a different license), as indicated
++++ in the file; or
++++
++++(c) The contribution was provided directly to me by some other
++++ person who certified (a), (b) or (c) and I have not modified
++++ it.
++++
++++(d) I understand and agree that this project and the contribution
++++ are public and that a record of the contribution (including all
++++ personal information I submit with it, including my sign-off) is
++++ maintained indefinitely and may be redistributed consistent with
++++ this project or the open source license(s) involved.
++++```
++++
++++then you just add a line to every git commit message:
++++
++++ Docker-DCO-1.1-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
++++
++++using your real name (sorry, no pseudonyms or anonymous contributions.)
++++
++++One way to automate this, is customise your get ``commit.template`` by adding
++++a ``prepare-commit-msg`` hook to your libcontainer checkout:
++++
++++```
++++curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/docker/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
++++```
++++
++++* Note: the above script expects to find your GitHub user name in ``git config --get github.user``
++++
++++#### Small patch exception
++++
++++There are several exceptions to the signing requirement. Currently these are:
++++
++++* Your patch fixes spelling or grammar errors.
++++* Your patch is a single line change to documentation contained in the
++++ `docs` directory.
++++* Your patch fixes Markdown formatting or syntax errors in the
++++ documentation contained in the `docs` directory.
++++
++++If you have any questions, please refer to the FAQ in the [docs](to be written)
++++
++++### How can I become a maintainer?
++++
++++* Step 1: learn the component inside out
++++* Step 2: make yourself useful by contributing code, bugfixes, support etc.
++++* Step 3: volunteer on the irc channel (#libcontainer@freenode)
++++
++++Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
++++You don't have to be a maintainer to make a difference on the project!
++++
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++FROM crosbymichael/golang
++++
++++RUN apt-get update && apt-get install -y gcc make
++++RUN go get code.google.com/p/go.tools/cmd/cover
++++
++++# setup a playground for us to spawn containers in
++++RUN mkdir /busybox && \
++++ curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox
++++
++++RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \
++++ chmod +x /dind
++++
++++COPY . /go/src/github.com/docker/libcontainer
++++WORKDIR /go/src/github.com/docker/libcontainer
++++RUN cp sample_configs/minimal.json /busybox/container.json
++++
++++ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor
++++
++++RUN go get -d -v ./...
++++RUN make direct-install
++++
++++ENTRYPOINT ["/dind"]
++++CMD ["make", "direct-test"]
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++
++++ Apache License
++++ Version 2.0, January 2004
++++ http://www.apache.org/licenses/
++++
++++ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++++
++++ 1. Definitions.
++++
++++ "License" shall mean the terms and conditions for use, reproduction,
++++ and distribution as defined by Sections 1 through 9 of this document.
++++
++++ "Licensor" shall mean the copyright owner or entity authorized by
++++ the copyright owner that is granting the License.
++++
++++ "Legal Entity" shall mean the union of the acting entity and all
++++ other entities that control, are controlled by, or are under common
++++ control with that entity. For the purposes of this definition,
++++ "control" means (i) the power, direct or indirect, to cause the
++++ direction or management of such entity, whether by contract or
++++ otherwise, or (ii) ownership of fifty percent (50%) or more of the
++++ outstanding shares, or (iii) beneficial ownership of such entity.
++++
++++ "You" (or "Your") shall mean an individual or Legal Entity
++++ exercising permissions granted by this License.
++++
++++ "Source" form shall mean the preferred form for making modifications,
++++ including but not limited to software source code, documentation
++++ source, and configuration files.
++++
++++ "Object" form shall mean any form resulting from mechanical
++++ transformation or translation of a Source form, including but
++++ not limited to compiled object code, generated documentation,
++++ and conversions to other media types.
++++
++++ "Work" shall mean the work of authorship, whether in Source or
++++ Object form, made available under the License, as indicated by a
++++ copyright notice that is included in or attached to the work
++++ (an example is provided in the Appendix below).
++++
++++ "Derivative Works" shall mean any work, whether in Source or Object
++++ form, that is based on (or derived from) the Work and for which the
++++ editorial revisions, annotations, elaborations, or other modifications
++++ represent, as a whole, an original work of authorship. For the purposes
++++ of this License, Derivative Works shall not include works that remain
++++ separable from, or merely link (or bind by name) to the interfaces of,
++++ the Work and Derivative Works thereof.
++++
++++ "Contribution" shall mean any work of authorship, including
++++ the original version of the Work and any modifications or additions
++++ to that Work or Derivative Works thereof, that is intentionally
++++ submitted to Licensor for inclusion in the Work by the copyright owner
++++ or by an individual or Legal Entity authorized to submit on behalf of
++++ the copyright owner. For the purposes of this definition, "submitted"
++++ means any form of electronic, verbal, or written communication sent
++++ to the Licensor or its representatives, including but not limited to
++++ communication on electronic mailing lists, source code control systems,
++++ and issue tracking systems that are managed by, or on behalf of, the
++++ Licensor for the purpose of discussing and improving the Work, but
++++ excluding communication that is conspicuously marked or otherwise
++++ designated in writing by the copyright owner as "Not a Contribution."
++++
++++ "Contributor" shall mean Licensor and any individual or Legal Entity
++++ on behalf of whom a Contribution has been received by Licensor and
++++ subsequently incorporated within the Work.
++++
++++ 2. Grant of Copyright License. Subject to the terms and conditions of
++++ this License, each Contributor hereby grants to You a perpetual,
++++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++++ copyright license to reproduce, prepare Derivative Works of,
++++ publicly display, publicly perform, sublicense, and distribute the
++++ Work and such Derivative Works in Source or Object form.
++++
++++ 3. Grant of Patent License. Subject to the terms and conditions of
++++ this License, each Contributor hereby grants to You a perpetual,
++++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++++ (except as stated in this section) patent license to make, have made,
++++ use, offer to sell, sell, import, and otherwise transfer the Work,
++++ where such license applies only to those patent claims licensable
++++ by such Contributor that are necessarily infringed by their
++++ Contribution(s) alone or by combination of their Contribution(s)
++++ with the Work to which such Contribution(s) was submitted. If You
++++ institute patent litigation against any entity (including a
++++ cross-claim or counterclaim in a lawsuit) alleging that the Work
++++ or a Contribution incorporated within the Work constitutes direct
++++ or contributory patent infringement, then any patent licenses
++++ granted to You under this License for that Work shall terminate
++++ as of the date such litigation is filed.
++++
++++ 4. Redistribution. You may reproduce and distribute copies of the
++++ Work or Derivative Works thereof in any medium, with or without
++++ modifications, and in Source or Object form, provided that You
++++ meet the following conditions:
++++
++++ (a) You must give any other recipients of the Work or
++++ Derivative Works a copy of this License; and
++++
++++ (b) You must cause any modified files to carry prominent notices
++++ stating that You changed the files; and
++++
++++ (c) You must retain, in the Source form of any Derivative Works
++++ that You distribute, all copyright, patent, trademark, and
++++ attribution notices from the Source form of the Work,
++++ excluding those notices that do not pertain to any part of
++++ the Derivative Works; and
++++
++++ (d) If the Work includes a "NOTICE" text file as part of its
++++ distribution, then any Derivative Works that You distribute must
++++ include a readable copy of the attribution notices contained
++++ within such NOTICE file, excluding those notices that do not
++++ pertain to any part of the Derivative Works, in at least one
++++ of the following places: within a NOTICE text file distributed
++++ as part of the Derivative Works; within the Source form or
++++ documentation, if provided along with the Derivative Works; or,
++++ within a display generated by the Derivative Works, if and
++++ wherever such third-party notices normally appear. The contents
++++ of the NOTICE file are for informational purposes only and
++++ do not modify the License. You may add Your own attribution
++++ notices within Derivative Works that You distribute, alongside
++++ or as an addendum to the NOTICE text from the Work, provided
++++ that such additional attribution notices cannot be construed
++++ as modifying the License.
++++
++++ You may add Your own copyright statement to Your modifications and
++++ may provide additional or different license terms and conditions
++++ for use, reproduction, or distribution of Your modifications, or
++++ for any such Derivative Works as a whole, provided Your use,
++++ reproduction, and distribution of the Work otherwise complies with
++++ the conditions stated in this License.
++++
++++ 5. Submission of Contributions. Unless You explicitly state otherwise,
++++ any Contribution intentionally submitted for inclusion in the Work
++++ by You to the Licensor shall be under the terms and conditions of
++++ this License, without any additional terms or conditions.
++++ Notwithstanding the above, nothing herein shall supersede or modify
++++ the terms of any separate license agreement you may have executed
++++ with Licensor regarding such Contributions.
++++
++++ 6. Trademarks. This License does not grant permission to use the trade
++++ names, trademarks, service marks, or product names of the Licensor,
++++ except as required for reasonable and customary use in describing the
++++ origin of the Work and reproducing the content of the NOTICE file.
++++
++++ 7. Disclaimer of Warranty. Unless required by applicable law or
++++ agreed to in writing, Licensor provides the Work (and each
++++ Contributor provides its Contributions) on an "AS IS" BASIS,
++++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
++++ implied, including, without limitation, any warranties or conditions
++++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
++++ PARTICULAR PURPOSE. You are solely responsible for determining the
++++ appropriateness of using or redistributing the Work and assume any
++++ risks associated with Your exercise of permissions under this License.
++++
++++ 8. Limitation of Liability. In no event and under no legal theory,
++++ whether in tort (including negligence), contract, or otherwise,
++++ unless required by applicable law (such as deliberate and grossly
++++ negligent acts) or agreed to in writing, shall any Contributor be
++++ liable to You for damages, including any direct, indirect, special,
++++ incidental, or consequential damages of any character arising as a
++++ result of this License or out of the use or inability to use the
++++ Work (including but not limited to damages for loss of goodwill,
++++ work stoppage, computer failure or malfunction, or any and all
++++ other commercial damages or losses), even if such Contributor
++++ has been advised of the possibility of such damages.
++++
++++ 9. Accepting Warranty or Additional Liability. While redistributing
++++ the Work or Derivative Works thereof, You may choose to offer,
++++ and charge a fee for, acceptance of support, warranty, indemnity,
++++ or other liability obligations and/or rights consistent with this
++++ License. However, in accepting such obligations, You may act only
++++ on Your own behalf and on Your sole responsibility, not on behalf
++++ of any other Contributor, and only if You agree to indemnify,
++++ defend, and hold each Contributor harmless for any liability
++++ incurred by, or claims asserted against, such Contributor by reason
++++ of your accepting any such warranty or additional liability.
++++
++++ END OF TERMS AND CONDITIONS
++++
++++ Copyright 2014 Docker, Inc.
++++
++++ Licensed under the Apache License, Version 2.0 (the "License");
++++ you may not use this file except in compliance with the License.
++++ You may obtain a copy of the License at
++++
++++ http://www.apache.org/licenses/LICENSE-2.0
++++
++++ Unless required by applicable law or agreed to in writing, software
++++ distributed under the License is distributed on an "AS IS" BASIS,
++++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++++ See the License for the specific language governing permissions and
++++ limitations under the License.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Michael Crosby <michael@docker.com> (@crosbymichael)
++++Rohit Jnagal <jnagal@google.com> (@rjnagal)
++++Victor Marmol <vmarmol@google.com> (@vmarmol)
++++Mrunal Patel <mpatel@redhat.com> (@mrunalp)
++++.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
++++update-vendor.sh: Tianon Gravi <admwiggin@gmail.com> (@tianon)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++# The libcontainer Maintainers' Guide
++++
++++## Introduction
++++
++++Dear maintainer. Thank you for investing the time and energy to help
++++make libcontainer as useful as possible. Maintaining a project is difficult,
++++sometimes unrewarding work. Sure, you will get to contribute cool
++++features to the project. But most of your time will be spent reviewing,
++++cleaning up, documenting, answering questions, justifying design
++++decisions - while everyone has all the fun! But remember - the quality
++++of the maintainers work is what distinguishes the good projects from the
++++great. So please be proud of your work, even the unglamourous parts,
++++and encourage a culture of appreciation and respect for *every* aspect
++++of improving the project - not just the hot new features.
++++
++++This document is a manual for maintainers old and new. It explains what
++++is expected of maintainers, how they should work, and what tools are
++++available to them.
++++
++++This is a living document - if you see something out of date or missing,
++++speak up!
++++
++++## What are a maintainer's responsibility?
++++
++++It is every maintainer's responsibility to:
++++
++++* 1) Expose a clear roadmap for improving their component.
++++* 2) Deliver prompt feedback and decisions on pull requests.
++++* 3) Be available to anyone with questions, bug reports, criticism etc.
++++ on their component. This includes IRC, GitHub requests and the mailing
++++ list.
++++* 4) Make sure their component respects the philosophy, design and
++++ roadmap of the project.
++++
++++## How are decisions made?
++++
++++Short answer: with pull requests to the libcontainer repository.
++++
++++libcontainer is an open-source project with an open design philosophy. This
++++means that the repository is the source of truth for EVERY aspect of the
++++project, including its philosophy, design, roadmap and APIs. *If it's
++++part of the project, it's in the repo. It's in the repo, it's part of
++++the project.*
++++
++++As a result, all decisions can be expressed as changes to the
++++repository. An implementation change is a change to the source code. An
++++API change is a change to the API specification. A philosophy change is
++++a change to the philosophy manifesto. And so on.
++++
++++All decisions affecting libcontainer, big and small, follow the same 3 steps:
++++
++++* Step 1: Open a pull request. Anyone can do this.
++++
++++* Step 2: Discuss the pull request. Anyone can do this.
++++
++++* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do
++++this (see below "Who decides what?")
++++
++++
++++## Who decides what?
++++
++++All decisions are pull requests, and the relevant maintainers make
++++decisions by accepting or refusing the pull request. Review and acceptance
++++by anyone is denoted by adding a comment in the pull request: `LGTM`.
++++However, only currently listed `MAINTAINERS` are counted towards the required
++++two LGTMs.
++++
++++libcontainer follows the timeless, highly efficient and totally unfair system
++++known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with Michael Crosby in the role of BDFL.
++++This means that all decisions are made by default by Michael. Since making
++++every decision himself would be highly un-scalable, in practice decisions
++++are spread across multiple maintainers.
++++
++++The relevant maintainers for a pull request can be worked out in two steps:
++++
++++* Step 1: Determine the subdirectories affected by the pull request. This
++++ might be `netlink/` and `security/`, or any other part of the repo.
++++
++++* Step 2: Find the `MAINTAINERS` file which affects this directory. If the
++++ directory itself does not have a `MAINTAINERS` file, work your way up
++++ the repo hierarchy until you find one.
++++
++++### I'm a maintainer, and I'm going on holiday
++++
++++Please let your co-maintainers and other contributors know by raising a pull
++++request that comments out your `MAINTAINERS` file entry using a `#`.
++++
++++### I'm a maintainer, should I make pull requests too?
++++
++++Yes. Nobody should ever push to master directly. All changes should be
++++made through a pull request.
++++
++++### Who assigns maintainers?
++++
++++Michael has final `LGTM` approval for all pull requests to `MAINTAINERS` files.
++++
++++### How is this process changed?
++++
++++Just like everything else: by making a pull request :)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++
++++all:
++++ docker build -t docker/libcontainer .
++++
++++test:
++++ # we need NET_ADMIN for the netlink tests and SYS_ADMIN for mounting
++++ docker run --rm -it --privileged docker/libcontainer
++++
++++sh:
++++ docker run --rm -it --privileged -w /busybox docker/libcontainer nsinit exec sh
++++
++++GO_PACKAGES = $(shell find . -not \( -wholename ./vendor -prune -o -wholename ./.git -prune \) -name '*.go' -print0 | xargs -0n1 dirname | sort -u)
++++
++++direct-test:
++++ go test -cover -v $(GO_PACKAGES)
++++
++++direct-test-short:
++++ go test -cover -test.short -v $(GO_PACKAGES)
++++
++++direct-build:
++++ go build -v $(GO_PACKAGES)
++++
++++direct-install:
++++ go install -v $(GO_PACKAGES)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++libcontainer
++++Copyright 2012-2014 Docker, Inc.
++++
++++This product includes software developed at Docker, Inc. (http://www.docker.com).
++++
++++The following is courtesy of our legal counsel:
++++
++++
++++Use and transfer of Docker may be subject to certain restrictions by the
++++United States and other governments.
++++It is your responsibility to ensure that your use and/or transfer does not
++++violate applicable laws.
++++
++++For more information, please see http://www.bis.doc.gov
++++
++++See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++# libcontainer Principles
++++
++++In the design and development of libcontainer we try to follow these principles:
++++
++++(Work in progress)
++++
++++* Don't try to replace every tool. Instead, be an ingredient to improve them.
++++* Less code is better.
++++* Fewer components are better. Do you really need to add one more class?
++++* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand.
++++* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code.
++++* When hesitating between two options, choose the one that is easier to reverse.
++++* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later.
++++* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable.
++++* The fewer moving parts in a container, the better.
++++* Don't merge it unless you document it.
++++* Don't document it unless you can keep it up-to-date.
++++* Don't merge it unless you test it!
++++* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++## libcontainer - reference implementation for containers [](https://travis-ci.org/docker/libcontainer)
++++
++++### Note on API changes:
++++
++++Please bear with us while we work on making the libcontainer API stable and something that we can support long term. We are currently discussing the API with the community, therefore, if you currently depend on libcontainer please pin your dependency at a specific tag or commit id. Please join the discussion and help shape the API.
++++
++++#### Background
++++
++++libcontainer specifies configuration options for what a container is. It provides a native Go implementation for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management.
++++
++++
++++#### Container
++++A container is a self contained execution environment that shares the kernel of the host system and which is (optionally) isolated from other containers in the system.
++++
++++libcontainer may be used to execute a process in a container. If a user tries to run a new process inside an existing container, the new process is added to the processes executing in the container.
++++
++++
++++#### Root file system
++++
++++A container runs with a directory known as its *root file system*, or *rootfs*, mounted as the file system root. The rootfs is usually a full system tree.
++++
++++
++++#### Configuration
++++
++++A container is initially configured by supplying configuration data when the container is created.
++++
++++
++++#### nsinit
++++
++++`nsinit` is a cli application which demonstrates the use of libcontainer. It is able to spawn new containers or join existing containers, based on the current directory.
++++
++++To use `nsinit`, cd into a Linux rootfs and copy a `container.json` file into the directory with your specified configuration. Environment, networking, and different capabilities for the container are specified in this file. The configuration is used for each process executed inside the container.
++++
++++See the `sample_configs` folder for examples of what the container configuration should look like.
++++
++++To execute `/bin/bash` in the current directory as a container just run the following **as root**:
++++```bash
++++nsinit exec /bin/bash
++++```
++++
++++If you wish to spawn another process inside the container while your current bash session is running, run the same command again to get another bash shell (or change the command). If the original process (PID 1) dies, all other processes spawned inside the container will be killed and the namespace will be removed.
++++
++++You can identify if a process is running in a container by looking to see if `state.json` is in the root of the directory.
++++
++++You may also specify an alternate root place where the `container.json` file is read and where the `state.json` file will be saved.
++++
++++#### Future
++++See the [roadmap](ROADMAP.md).
++++
++++## Copyright and license
++++
++++Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
++++Docs released under Creative commons.
++++
++++## Hacking on libcontainer
++++
++++First of all, please familiarise yourself with the [libcontainer Principles](PRINCIPLES.md).
++++
++++If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTORS_GUIDE.md).
++++
++++If you're a *maintainer* or aspiring maintainer, you should read the [Maintainers' Guide](MAINTAINERS_GUIDE.md) and
++++"How can I become a maintainer?" in the Contributors' Guide.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++# libcontainer: what's next?
++++
++++This document is a high-level overview of where we want to take libcontainer next.
++++It is a curated selection of planned improvements which are either important, difficult, or both.
++++
++++For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/libcontainer/issues).
++++
++++To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
++++
++++## Broader kernel support
++++
++++Our goal is to make libcontainer run everywhere, but currently libcontainer requires Linux version 3.8 or higher. If you’re deploying new machines for the purpose of running libcontainer, this is a fairly easy requirement to meet. However, if you’re adding libcontainer to an existing deployment, you may not have the flexibility to update and patch the kernel.
++++
++++## Cross-architecture support
++++
++++Our goal is to make libcontainer run everywhere. However currently libcontainer only runs on x86_64 systems. We plan on expanding architecture support, so that libcontainer containers can be created and used on more architectures.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++/*
++++Temporary API endpoint for libcontainer while the full API is finalized (api.go).
++++*/
++++package libcontainer
++++
++++import (
++++ "github.com/docker/libcontainer/cgroups/fs"
++++ "github.com/docker/libcontainer/cgroups/systemd"
++++ "github.com/docker/libcontainer/network"
++++)
++++
++++// TODO(vmarmol): Complete Stats() in final libcontainer API and move users to that.
++++// DEPRECATED: The below portions are only to be used during the transition to the official API.
++++// Returns all available stats for the given container.
++++func GetStats(container *Config, state *State) (*ContainerStats, error) {
++++ var (
++++ err error
++++ stats = &ContainerStats{}
++++ )
++++
++++ if systemd.UseSystemd() {
++++ stats.CgroupStats, err = systemd.GetStats(container.Cgroups)
++++ } else {
++++ stats.CgroupStats, err = fs.GetStats(container.Cgroups)
++++ }
++++
++++ if err != nil {
++++ return stats, err
++++ }
++++
++++ stats.NetworkStats, err = network.GetStats(&state.NetworkState)
++++
++++ return stats, err
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build apparmor,linux
++++
++++package apparmor
++++
++++// #cgo LDFLAGS: -lapparmor
++++// #include <sys/apparmor.h>
++++// #include <stdlib.h>
++++import "C"
++++import (
++++ "io/ioutil"
++++ "os"
++++ "unsafe"
++++)
++++
++++func IsEnabled() bool {
++++ if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
++++ buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
++++ return err == nil && len(buf) > 1 && buf[0] == 'Y'
++++ }
++++ return false
++++}
++++
++++func ApplyProfile(name string) error {
++++ if name == "" {
++++ return nil
++++ }
++++
++++ cName := C.CString(name)
++++ defer C.free(unsafe.Pointer(cName))
++++
++++ if _, err := C.aa_change_onexec(cName); err != nil {
++++ return err
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !apparmor !linux
++++
++++package apparmor
++++
++++func IsEnabled() bool {
++++ return false
++++}
++++
++++func ApplyProfile(name string) error {
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package apparmor
++++
++++import (
++++ "io"
++++ "os"
++++ "text/template"
++++)
++++
++++type data struct {
++++ Name string
++++ Imports []string
++++ InnerImports []string
++++}
++++
++++const baseTemplate = `
++++{{range $value := .Imports}}
++++{{$value}}
++++{{end}}
++++
++++profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
++++{{range $value := .InnerImports}}
++++ {{$value}}
++++{{end}}
++++
++++ network,
++++ capability,
++++ file,
++++ umount,
++++
++++ mount fstype=tmpfs,
++++ mount fstype=mqueue,
++++ mount fstype=fuse.*,
++++ mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/,
++++ mount fstype=efivarfs -> /sys/firmware/efi/efivars/,
++++ mount fstype=fusectl -> /sys/fs/fuse/connections/,
++++ mount fstype=securityfs -> /sys/kernel/security/,
++++ mount fstype=debugfs -> /sys/kernel/debug/,
++++ mount fstype=proc -> /proc/,
++++ mount fstype=sysfs -> /sys/,
++++
++++ deny @{PROC}/sys/fs/** wklx,
++++ deny @{PROC}/sysrq-trigger rwklx,
++++ deny @{PROC}/mem rwklx,
++++ deny @{PROC}/kmem rwklx,
++++ deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
++++ deny @{PROC}/sys/kernel/*/** wklx,
++++
++++ deny mount options=(ro, remount) -> /,
++++ deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/,
++++ deny mount fstype=devpts,
++++
++++ deny /sys/[^f]*/** wklx,
++++ deny /sys/f[^s]*/** wklx,
++++ deny /sys/fs/[^c]*/** wklx,
++++ deny /sys/fs/c[^g]*/** wklx,
++++ deny /sys/fs/cg[^r]*/** wklx,
++++ deny /sys/firmware/efi/efivars/** rwklx,
++++ deny /sys/kernel/security/** rwklx,
++++}
++++`
++++
++++func generateProfile(out io.Writer) error {
++++ compiled, err := template.New("apparmor_profile").Parse(baseTemplate)
++++ if err != nil {
++++ return err
++++ }
++++ data := &data{
++++ Name: "docker-default",
++++ }
++++ if tuntablesExists() {
++++ data.Imports = append(data.Imports, "#include <tunables/global>")
++++ } else {
++++ data.Imports = append(data.Imports, "@{PROC}=/proc/")
++++ }
++++ if abstrctionsEsists() {
++++ data.InnerImports = append(data.InnerImports, "#include <abstractions/base>")
++++ }
++++ if err := compiled.Execute(out, data); err != nil {
++++ return err
++++ }
++++ return nil
++++}
++++
++++// check if the tunables/global exist
++++func tuntablesExists() bool {
++++ _, err := os.Stat("/etc/apparmor.d/tunables/global")
++++ return err == nil
++++}
++++
++++// check if abstractions/base exist
++++func abstrctionsEsists() bool {
++++ _, err := os.Stat("/etc/apparmor.d/abstractions/base")
++++ return err == nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package apparmor
++++
++++import (
++++ "fmt"
++++ "os"
++++ "os/exec"
++++ "path"
++++)
++++
++++const (
++++ DefaultProfilePath = "/etc/apparmor.d/docker"
++++)
++++
++++func InstallDefaultProfile() error {
++++ if !IsEnabled() {
++++ return nil
++++ }
++++
++++ // Make sure /etc/apparmor.d exists
++++ if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil {
++++ return err
++++ }
++++
++++ f, err := os.OpenFile(DefaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
++++ if err != nil {
++++ return err
++++ }
++++ if err := generateProfile(f); err != nil {
++++ f.Close()
++++ return err
++++ }
++++ f.Close()
++++
++++ cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker")
++++ // to use the parser directly we have to make sure we are in the correct
++++ // dir with the profile
++++ cmd.Dir = "/etc/apparmor.d"
++++
++++ output, err := cmd.CombinedOutput()
++++ if err != nil {
++++ return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output)
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package cgroups
++++
++++import (
++++ "fmt"
++++
++++ "github.com/docker/libcontainer/devices"
++++)
++++
++++type FreezerState string
++++
++++const (
++++ Undefined FreezerState = ""
++++ Frozen FreezerState = "FROZEN"
++++ Thawed FreezerState = "THAWED"
++++)
++++
++++type NotFoundError struct {
++++ Subsystem string
++++}
++++
++++func (e *NotFoundError) Error() string {
++++ return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
++++}
++++
++++func NewNotFoundError(sub string) error {
++++ return &NotFoundError{
++++ Subsystem: sub,
++++ }
++++}
++++
++++func IsNotFound(err error) bool {
++++ if err == nil {
++++ return false
++++ }
++++
++++ _, ok := err.(*NotFoundError)
++++ return ok
++++}
++++
++++type Cgroup struct {
++++ Name string `json:"name,omitempty"`
++++ Parent string `json:"parent,omitempty"` // name of parent cgroup or slice
++++
++++ AllowAllDevices bool `json:"allow_all_devices,omitempty"` // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
++++ AllowedDevices []*devices.Device `json:"allowed_devices,omitempty"`
++++ Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes)
++++ MemoryReservation int64 `json:"memory_reservation,omitempty"` // Memory reservation or soft_limit (in bytes)
++++ MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap
++++ CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers)
++++ CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
++++ CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default.
++++ CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use
++++ Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process
++++ Slice string `json:"slice,omitempty"` // Parent slice to use for systemd
++++}
++++
++++type ActiveCgroup interface {
++++ Cleanup() error
++++ Paths() (map[string]string, error)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package cgroups
++++
++++import (
++++ "bytes"
++++ "testing"
++++)
++++
++++const (
++++ cgroupsContents = `11:hugetlb:/
++++10:perf_event:/
++++9:blkio:/
++++8:net_cls:/
++++7:freezer:/
++++6:devices:/
++++5:memory:/
++++4:cpuacct,cpu:/
++++3:cpuset:/
++++2:name=systemd:/user.slice/user-1000.slice/session-16.scope`
++++)
++++
++++func TestParseCgroups(t *testing.T) {
++++ r := bytes.NewBuffer([]byte(cgroupsContents))
++++ _, err := ParseCgroupFile("blkio", r)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "encoding/json"
++++ "fmt"
++++ "log"
++++ "os"
++++ "syscall"
++++ "time"
++++
++++ "github.com/codegangsta/cli"
++++ "github.com/docker/libcontainer/cgroups"
++++ "github.com/docker/libcontainer/cgroups/fs"
++++ "github.com/docker/libcontainer/cgroups/systemd"
++++)
++++
++++var createCommand = cli.Command{
++++ Name: "create",
++++ Usage: "Create a cgroup container using the supplied configuration and initial process.",
++++ Flags: []cli.Flag{
++++ cli.StringFlag{Name: "config, c", Value: "cgroup.json", Usage: "path to container configuration (cgroups.Cgroup object)"},
++++ cli.IntFlag{Name: "pid, p", Value: 0, Usage: "pid of the initial process in the container"},
++++ },
++++ Action: createAction,
++++}
++++
++++var destroyCommand = cli.Command{
++++ Name: "destroy",
++++ Usage: "Destroy an existing cgroup container.",
++++ Flags: []cli.Flag{
++++ cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
++++ cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
++++ },
++++ Action: destroyAction,
++++}
++++
++++var statsCommand = cli.Command{
++++ Name: "stats",
++++ Usage: "Get stats for cgroup",
++++ Flags: []cli.Flag{
++++ cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
++++ cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
++++ },
++++ Action: statsAction,
++++}
++++
++++var pauseCommand = cli.Command{
++++ Name: "pause",
++++ Usage: "Pause cgroup",
++++ Flags: []cli.Flag{
++++ cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
++++ cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
++++ },
++++ Action: pauseAction,
++++}
++++
++++var resumeCommand = cli.Command{
++++ Name: "resume",
++++ Usage: "Resume a paused cgroup",
++++ Flags: []cli.Flag{
++++ cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
++++ cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
++++ },
++++ Action: resumeAction,
++++}
++++
++++var psCommand = cli.Command{
++++ Name: "ps",
++++ Usage: "Get list of pids for a cgroup",
++++ Flags: []cli.Flag{
++++ cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
++++ cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
++++ },
++++ Action: psAction,
++++}
++++
++++func getConfigFromFile(c *cli.Context) (*cgroups.Cgroup, error) {
++++ f, err := os.Open(c.String("config"))
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer f.Close()
++++
++++ var config *cgroups.Cgroup
++++ if err := json.NewDecoder(f).Decode(&config); err != nil {
++++ log.Fatal(err)
++++ }
++++ return config, nil
++++}
++++
++++func openLog(name string) error {
++++ f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755)
++++ if err != nil {
++++ return err
++++ }
++++
++++ log.SetOutput(f)
++++ return nil
++++}
++++
++++func getConfig(context *cli.Context) (*cgroups.Cgroup, error) {
++++ name := context.String("name")
++++ if name == "" {
++++ log.Fatal(fmt.Errorf("Missing container name"))
++++ }
++++ parent := context.String("parent")
++++ return &cgroups.Cgroup{
++++ Name: name,
++++ Parent: parent,
++++ }, nil
++++}
++++
++++func killAll(config *cgroups.Cgroup) {
++++ // We could use freezer here to prevent process spawning while we are trying
++++ // to kill everything. But going with more portable solution of retrying for
++++ // now.
++++ pids := getPids(config)
++++ retry := 10
++++ for len(pids) != 0 || retry > 0 {
++++ killPids(pids)
++++ time.Sleep(100 * time.Millisecond)
++++ retry--
++++ pids = getPids(config)
++++ }
++++ if len(pids) != 0 {
++++ log.Fatal(fmt.Errorf("Could not kill existing processes in the container."))
++++ }
++++}
++++
++++func getPids(config *cgroups.Cgroup) []int {
++++ pids, err := fs.GetPids(config)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ return pids
++++}
++++
++++func killPids(pids []int) {
++++ for _, pid := range pids {
++++ // pids might go away on their own. Ignore errors.
++++ syscall.Kill(pid, syscall.SIGKILL)
++++ }
++++}
++++
++++func setFreezerState(context *cli.Context, state cgroups.FreezerState) {
++++ config, err := getConfig(context)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ if systemd.UseSystemd() {
++++ err = systemd.Freeze(config, state)
++++ } else {
++++ err = fs.Freeze(config, state)
++++ }
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++}
++++
++++func createAction(context *cli.Context) {
++++ config, err := getConfigFromFile(context)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ pid := context.Int("pid")
++++ if pid <= 0 {
++++ log.Fatal(fmt.Errorf("Invalid pid : %d", pid))
++++ }
++++ if systemd.UseSystemd() {
++++ _, err := systemd.Apply(config, pid)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ } else {
++++ _, err := fs.Apply(config, pid)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ }
++++}
++++
++++func destroyAction(context *cli.Context) {
++++ config, err := getConfig(context)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ killAll(config)
++++ // Systemd will clean up cgroup state for empty container.
++++ if !systemd.UseSystemd() {
++++ err := fs.Cleanup(config)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ }
++++}
++++
++++func statsAction(context *cli.Context) {
++++ config, err := getConfig(context)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ stats, err := fs.GetStats(config)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ out, err := json.MarshalIndent(stats, "", "\t")
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ fmt.Printf("Usage stats for '%s':\n %v\n", config.Name, string(out))
++++}
++++
++++func pauseAction(context *cli.Context) {
++++ setFreezerState(context, cgroups.Frozen)
++++}
++++
++++func resumeAction(context *cli.Context) {
++++ setFreezerState(context, cgroups.Thawed)
++++}
++++
++++func psAction(context *cli.Context) {
++++ config, err := getConfig(context)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ pids, err := fs.GetPids(config)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ fmt.Printf("Pids in '%s':\n", config.Name)
++++ fmt.Println(pids)
++++}
++++
++++func main() {
++++ logPath := os.Getenv("log")
++++ if logPath != "" {
++++ if err := openLog(logPath); err != nil {
++++ log.Fatal(err)
++++ }
++++ }
++++
++++ app := cli.NewApp()
++++ app.Name = "cgutil"
++++ app.Usage = "Test utility for libcontainer cgroups package"
++++ app.Version = "0.1"
++++
++++ app.Commands = []cli.Command{
++++ createCommand,
++++ destroyCommand,
++++ statsCommand,
++++ pauseCommand,
++++ resumeCommand,
++++ psCommand,
++++ }
++++
++++ if err := app.Run(os.Args); err != nil {
++++ log.Fatal(err)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++{
++++ "name": "luke",
++++ "parent": "darth",
++++ "allow_all_devices": true,
++++ "memory": 1073741824,
++++ "memory_swap": -1,
++++ "cpu_shares": 2048,
++++ "cpu_quota": 500000,
++++ "cpu_period": 250000
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "fmt"
++++ "io/ioutil"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++var (
++++ subsystems = map[string]subsystem{
++++ "devices": &DevicesGroup{},
++++ "memory": &MemoryGroup{},
++++ "cpu": &CpuGroup{},
++++ "cpuset": &CpusetGroup{},
++++ "cpuacct": &CpuacctGroup{},
++++ "blkio": &BlkioGroup{},
++++ "perf_event": &PerfEventGroup{},
++++ "freezer": &FreezerGroup{},
++++ }
++++ CgroupProcesses = "cgroup.procs"
++++)
++++
++++// The absolute path to the root of the cgroup hierarchies.
++++var cgroupRoot string
++++
++++// TODO(vmarmol): Report error here, we'll probably need to wait for the new API.
++++func init() {
++++ // we can pick any subsystem to find the root
++++ cpuRoot, err := cgroups.FindCgroupMountpoint("cpu")
++++ if err != nil {
++++ return
++++ }
++++ cgroupRoot = filepath.Dir(cpuRoot)
++++
++++ if _, err := os.Stat(cgroupRoot); err != nil {
++++ return
++++ }
++++}
++++
++++type subsystem interface {
++++ // Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
++++ GetStats(path string, stats *cgroups.Stats) error
++++ // Removes the cgroup represented by 'data'.
++++ Remove(*data) error
++++ // Creates and joins the cgroup represented by data.
++++ Set(*data) error
++++}
++++
++++type data struct {
++++ root string
++++ cgroup string
++++ c *cgroups.Cgroup
++++ pid int
++++}
++++
++++func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) {
++++ d, err := getCgroupData(c, pid)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ for _, sys := range subsystems {
++++ if err := sys.Set(d); err != nil {
++++ d.Cleanup()
++++ return nil, err
++++ }
++++ }
++++
++++ return d, nil
++++}
++++
++++func Cleanup(c *cgroups.Cgroup) error {
++++ d, err := getCgroupData(c, 0)
++++ if err != nil {
++++ return fmt.Errorf("Could not get Cgroup data %s", err)
++++ }
++++ return d.Cleanup()
++++}
++++
++++func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
++++ stats := cgroups.NewStats()
++++
++++ d, err := getCgroupData(c, 0)
++++ if err != nil {
++++ return nil, fmt.Errorf("getting CgroupData %s", err)
++++ }
++++
++++ for sysname, sys := range subsystems {
++++ path, err := d.path(sysname)
++++ if err != nil {
++++ // Don't fail if a cgroup hierarchy was not found, just skip this subsystem
++++ if cgroups.IsNotFound(err) {
++++ continue
++++ }
++++
++++ return nil, err
++++ }
++++
++++ if err := sys.GetStats(path, stats); err != nil {
++++ return nil, err
++++ }
++++ }
++++
++++ return stats, nil
++++}
++++
++++// Freeze toggles the container's freezer cgroup depending on the state
++++// provided
++++func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error {
++++ d, err := getCgroupData(c, 0)
++++ if err != nil {
++++ return err
++++ }
++++
++++ c.Freezer = state
++++
++++ freezer := subsystems["freezer"]
++++
++++ return freezer.Set(d)
++++}
++++
++++func GetPids(c *cgroups.Cgroup) ([]int, error) {
++++ d, err := getCgroupData(c, 0)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ dir, err := d.path("devices")
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ return cgroups.ReadProcsFile(dir)
++++}
++++
++++func getCgroupData(c *cgroups.Cgroup, pid int) (*data, error) {
++++ if cgroupRoot == "" {
++++ return nil, fmt.Errorf("failed to find the cgroup root")
++++ }
++++
++++ cgroup := c.Name
++++ if c.Parent != "" {
++++ cgroup = filepath.Join(c.Parent, cgroup)
++++ }
++++
++++ return &data{
++++ root: cgroupRoot,
++++ cgroup: cgroup,
++++ c: c,
++++ pid: pid,
++++ }, nil
++++}
++++
++++func (raw *data) parent(subsystem string) (string, error) {
++++ initPath, err := cgroups.GetInitCgroupDir(subsystem)
++++ if err != nil {
++++ return "", err
++++ }
++++ return filepath.Join(raw.root, subsystem, initPath), nil
++++}
++++
++++func (raw *data) Paths() (map[string]string, error) {
++++ paths := make(map[string]string)
++++
++++ for sysname := range subsystems {
++++ path, err := raw.path(sysname)
++++ if err != nil {
++++ // Don't fail if a cgroup hierarchy was not found, just skip this subsystem
++++ if cgroups.IsNotFound(err) {
++++ continue
++++ }
++++
++++ return nil, err
++++ }
++++
++++ paths[sysname] = path
++++ }
++++
++++ return paths, nil
++++}
++++
++++func (raw *data) path(subsystem string) (string, error) {
++++ // If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
++++ if filepath.IsAbs(raw.cgroup) {
++++ path := filepath.Join(raw.root, subsystem, raw.cgroup)
++++
++++ if _, err := os.Stat(path); err != nil {
++++ if os.IsNotExist(err) {
++++ return "", cgroups.NewNotFoundError(subsystem)
++++ }
++++
++++ return "", err
++++ }
++++
++++ return path, nil
++++ }
++++
++++ parent, err := raw.parent(subsystem)
++++ if err != nil {
++++ return "", err
++++ }
++++
++++ return filepath.Join(parent, raw.cgroup), nil
++++}
++++
++++func (raw *data) join(subsystem string) (string, error) {
++++ path, err := raw.path(subsystem)
++++ if err != nil {
++++ return "", err
++++ }
++++ if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
++++ return "", err
++++ }
++++ if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil {
++++ return "", err
++++ }
++++ return path, nil
++++}
++++
++++func (raw *data) Cleanup() error {
++++ for _, sys := range subsystems {
++++ sys.Remove(raw)
++++ }
++++ return nil
++++}
++++
++++func writeFile(dir, file, data string) error {
++++ return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
++++}
++++
++++func readFile(dir, file string) (string, error) {
++++ data, err := ioutil.ReadFile(filepath.Join(dir, file))
++++ return string(data), err
++++}
++++
++++func removePath(p string, err error) error {
++++ if err != nil {
++++ return err
++++ }
++++ if p != "" {
++++ return os.RemoveAll(p)
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "bufio"
++++ "fmt"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++ "strings"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++type BlkioGroup struct {
++++}
++++
++++func (s *BlkioGroup) Set(d *data) error {
++++ // we just want to join this group even though we don't set anything
++++ if _, err := d.join("blkio"); err != nil && !cgroups.IsNotFound(err) {
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++func (s *BlkioGroup) Remove(d *data) error {
++++ return removePath(d.path("blkio"))
++++}
++++
++++/*
++++examples:
++++
++++ blkio.sectors
++++ 8:0 6792
++++
++++ blkio.io_service_bytes
++++ 8:0 Read 1282048
++++ 8:0 Write 2195456
++++ 8:0 Sync 2195456
++++ 8:0 Async 1282048
++++ 8:0 Total 3477504
++++ Total 3477504
++++
++++ blkio.io_serviced
++++ 8:0 Read 124
++++ 8:0 Write 104
++++ 8:0 Sync 104
++++ 8:0 Async 124
++++ 8:0 Total 228
++++ Total 228
++++
++++ blkio.io_queued
++++ 8:0 Read 0
++++ 8:0 Write 0
++++ 8:0 Sync 0
++++ 8:0 Async 0
++++ 8:0 Total 0
++++ Total 0
++++*/
++++
++++func splitBlkioStatLine(r rune) bool {
++++ return r == ' ' || r == ':'
++++}
++++
++++func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
++++ var blkioStats []cgroups.BlkioStatEntry
++++ f, err := os.Open(path)
++++ if err != nil {
++++ if os.IsNotExist(err) {
++++ return blkioStats, nil
++++ }
++++ return nil, err
++++ }
++++ defer f.Close()
++++
++++ sc := bufio.NewScanner(f)
++++ for sc.Scan() {
++++ // format: dev type amount
++++ fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
++++ if len(fields) < 3 {
++++ if len(fields) == 2 && fields[0] == "Total" {
++++ // skip total line
++++ continue
++++ } else {
++++ return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
++++ }
++++ }
++++
++++ v, err := strconv.ParseUint(fields[0], 10, 64)
++++ if err != nil {
++++ return nil, err
++++ }
++++ major := v
++++
++++ v, err = strconv.ParseUint(fields[1], 10, 64)
++++ if err != nil {
++++ return nil, err
++++ }
++++ minor := v
++++
++++ op := ""
++++ valueField := 2
++++ if len(fields) == 4 {
++++ op = fields[2]
++++ valueField = 3
++++ }
++++ v, err = strconv.ParseUint(fields[valueField], 10, 64)
++++ if err != nil {
++++ return nil, err
++++ }
++++ blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
++++ }
++++
++++ return blkioStats, nil
++++}
++++
++++func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
++++ // Try to read CFQ stats available on all CFQ enabled kernels first
++++ if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil {
++++ return getCFQStats(path, stats)
++++ }
++++ return getStats(path, stats) // Use generic stats as fallback
++++}
++++
++++func getCFQStats(path string, stats *cgroups.Stats) error {
++++ var blkioStats []cgroups.BlkioStatEntry
++++ var err error
++++
++++ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
++++ return err
++++ }
++++ stats.BlkioStats.SectorsRecursive = blkioStats
++++
++++ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
++++ return err
++++ }
++++ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
++++
++++ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
++++ return err
++++ }
++++ stats.BlkioStats.IoServicedRecursive = blkioStats
++++
++++ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
++++ return err
++++ }
++++ stats.BlkioStats.IoQueuedRecursive = blkioStats
++++
++++ return nil
++++}
++++
++++func getStats(path string, stats *cgroups.Stats) error {
++++ var blkioStats []cgroups.BlkioStatEntry
++++ var err error
++++
++++ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil {
++++ return err
++++ }
++++ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
++++
++++ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil {
++++ return err
++++ }
++++ stats.BlkioStats.IoServicedRecursive = blkioStats
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "testing"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++const (
++++ sectorsRecursiveContents = `8:0 1024`
++++ serviceBytesRecursiveContents = `8:0 Read 100
++++8:0 Write 200
++++8:0 Sync 300
++++8:0 Async 500
++++8:0 Total 500
++++Total 500`
++++ servicedRecursiveContents = `8:0 Read 10
++++8:0 Write 40
++++8:0 Sync 20
++++8:0 Async 30
++++8:0 Total 50
++++Total 50`
++++ queuedRecursiveContents = `8:0 Read 1
++++8:0 Write 4
++++8:0 Sync 2
++++8:0 Async 3
++++8:0 Total 5
++++Total 5`
++++ throttleServiceBytes = `8:0 Read 11030528
++++8:0 Write 23
++++8:0 Sync 42
++++8:0 Async 11030528
++++8:0 Total 11030528
++++252:0 Read 11030528
++++252:0 Write 23
++++252:0 Sync 42
++++252:0 Async 11030528
++++252:0 Total 11030528
++++Total 22061056`
++++ throttleServiced = `8:0 Read 164
++++8:0 Write 23
++++8:0 Sync 42
++++8:0 Async 164
++++8:0 Total 164
++++252:0 Read 164
++++252:0 Write 23
++++252:0 Sync 42
++++252:0 Async 164
++++252:0 Total 164
++++Total 328`
++++)
++++
++++func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) {
++++ *blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op})
++++}
++++
++++func TestBlkioStats(t *testing.T) {
++++ helper := NewCgroupTestUtil("blkio", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
++++ "blkio.io_serviced_recursive": servicedRecursiveContents,
++++ "blkio.io_queued_recursive": queuedRecursiveContents,
++++ "blkio.sectors_recursive": sectorsRecursiveContents,
++++ })
++++
++++ blkio := &BlkioGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := blkio.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ // Verify expected stats.
++++ expectedStats := cgroups.BlkioStats{}
++++ appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "")
++++
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total")
++++
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total")
++++
++++ appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read")
++++ appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write")
++++ appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync")
++++ appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async")
++++ appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total")
++++
++++ expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
++++}
++++
++++func TestBlkioStatsNoSectorsFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("blkio", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
++++ "blkio.io_serviced_recursive": servicedRecursiveContents,
++++ "blkio.io_queued_recursive": queuedRecursiveContents,
++++ })
++++
++++ blkio := &BlkioGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := blkio.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatalf("Failed unexpectedly: %s", err)
++++ }
++++}
++++
++++func TestBlkioStatsNoServiceBytesFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("blkio", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "blkio.io_serviced_recursive": servicedRecursiveContents,
++++ "blkio.io_queued_recursive": queuedRecursiveContents,
++++ "blkio.sectors_recursive": sectorsRecursiveContents,
++++ })
++++
++++ blkio := &BlkioGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := blkio.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatalf("Failed unexpectedly: %s", err)
++++ }
++++}
++++
++++func TestBlkioStatsNoServicedFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("blkio", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
++++ "blkio.io_queued_recursive": queuedRecursiveContents,
++++ "blkio.sectors_recursive": sectorsRecursiveContents,
++++ })
++++
++++ blkio := &BlkioGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := blkio.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatalf("Failed unexpectedly: %s", err)
++++ }
++++}
++++
++++func TestBlkioStatsNoQueuedFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("blkio", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
++++ "blkio.io_serviced_recursive": servicedRecursiveContents,
++++ "blkio.sectors_recursive": sectorsRecursiveContents,
++++ })
++++
++++ blkio := &BlkioGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := blkio.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatalf("Failed unexpectedly: %s", err)
++++ }
++++}
++++
++++func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) {
++++ helper := NewCgroupTestUtil("blkio", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "blkio.io_service_bytes_recursive": "8:0 Read 100 100",
++++ "blkio.io_serviced_recursive": servicedRecursiveContents,
++++ "blkio.io_queued_recursive": queuedRecursiveContents,
++++ "blkio.sectors_recursive": sectorsRecursiveContents,
++++ })
++++
++++ blkio := &BlkioGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := blkio.GetStats(helper.CgroupPath, &actualStats)
++++ if err == nil {
++++ t.Fatal("Expected to fail, but did not")
++++ }
++++}
++++
++++func TestBlkioStatsUnexpectedFieldType(t *testing.T) {
++++ helper := NewCgroupTestUtil("blkio", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "blkio.io_service_bytes_recursive": "8:0 Read Write",
++++ "blkio.io_serviced_recursive": servicedRecursiveContents,
++++ "blkio.io_queued_recursive": queuedRecursiveContents,
++++ "blkio.sectors_recursive": sectorsRecursiveContents,
++++ })
++++
++++ blkio := &BlkioGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := blkio.GetStats(helper.CgroupPath, &actualStats)
++++ if err == nil {
++++ t.Fatal("Expected to fail, but did not")
++++ }
++++}
++++
++++func TestNonCFQBlkioStats(t *testing.T) {
++++ helper := NewCgroupTestUtil("blkio", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "blkio.io_service_bytes_recursive": "",
++++ "blkio.io_serviced_recursive": "",
++++ "blkio.io_queued_recursive": "",
++++ "blkio.sectors_recursive": "",
++++ "blkio.throttle.io_service_bytes": throttleServiceBytes,
++++ "blkio.throttle.io_serviced": throttleServiced,
++++ })
++++
++++ blkio := &BlkioGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := blkio.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ // Verify expected stats.
++++ expectedStats := cgroups.BlkioStats{}
++++
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async")
++++ appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total")
++++
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async")
++++ appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total")
++++
++++ expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "bufio"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++type CpuGroup struct {
++++}
++++
++++func (s *CpuGroup) Set(d *data) error {
++++ // We always want to join the cpu group, to allow fair cpu scheduling
++++ // on a container basis
++++ dir, err := d.join("cpu")
++++ if err != nil {
++++ return err
++++ }
++++ if d.c.CpuShares != 0 {
++++ if err := writeFile(dir, "cpu.shares", strconv.FormatInt(d.c.CpuShares, 10)); err != nil {
++++ return err
++++ }
++++ }
++++ if d.c.CpuPeriod != 0 {
++++ if err := writeFile(dir, "cpu.cfs_period_us", strconv.FormatInt(d.c.CpuPeriod, 10)); err != nil {
++++ return err
++++ }
++++ }
++++ if d.c.CpuQuota != 0 {
++++ if err := writeFile(dir, "cpu.cfs_quota_us", strconv.FormatInt(d.c.CpuQuota, 10)); err != nil {
++++ return err
++++ }
++++ }
++++ return nil
++++}
++++
++++func (s *CpuGroup) Remove(d *data) error {
++++ return removePath(d.path("cpu"))
++++}
++++
++++func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
++++ f, err := os.Open(filepath.Join(path, "cpu.stat"))
++++ if err != nil {
++++ if os.IsNotExist(err) {
++++ return nil
++++ }
++++ return err
++++ }
++++ defer f.Close()
++++
++++ sc := bufio.NewScanner(f)
++++ for sc.Scan() {
++++ t, v, err := getCgroupParamKeyValue(sc.Text())
++++ if err != nil {
++++ return err
++++ }
++++ switch t {
++++ case "nr_periods":
++++ stats.CpuStats.ThrottlingData.Periods = v
++++
++++ case "nr_throttled":
++++ stats.CpuStats.ThrottlingData.ThrottledPeriods = v
++++
++++ case "throttled_time":
++++ stats.CpuStats.ThrottlingData.ThrottledTime = v
++++ }
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "fmt"
++++ "testing"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++func TestCpuStats(t *testing.T) {
++++ helper := NewCgroupTestUtil("cpu", t)
++++ defer helper.cleanup()
++++
++++ const (
++++ kNrPeriods = 2000
++++ kNrThrottled = 200
++++ kThrottledTime = uint64(18446744073709551615)
++++ )
++++
++++ cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n",
++++ kNrPeriods, kNrThrottled, kThrottledTime)
++++ helper.writeFileContents(map[string]string{
++++ "cpu.stat": cpuStatContent,
++++ })
++++
++++ cpu := &CpuGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := cpu.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ expectedStats := cgroups.ThrottlingData{
++++ Periods: kNrPeriods,
++++ ThrottledPeriods: kNrThrottled,
++++ ThrottledTime: kThrottledTime}
++++
++++ expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData)
++++}
++++
++++func TestNoCpuStatFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("cpu", t)
++++ defer helper.cleanup()
++++
++++ cpu := &CpuGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := cpu.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatal("Expected not to fail, but did")
++++ }
++++}
++++
++++func TestInvalidCpuStat(t *testing.T) {
++++ helper := NewCgroupTestUtil("cpu", t)
++++ defer helper.cleanup()
++++ cpuStatContent := `nr_periods 2000
++++ nr_throttled 200
++++ throttled_time fortytwo`
++++ helper.writeFileContents(map[string]string{
++++ "cpu.stat": cpuStatContent,
++++ })
++++
++++ cpu := &CpuGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := cpu.GetStats(helper.CgroupPath, &actualStats)
++++ if err == nil {
++++ t.Fatal("Expected failed stat parsing.")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "fmt"
++++ "io/ioutil"
++++ "path/filepath"
++++ "strconv"
++++ "strings"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++ "github.com/docker/libcontainer/system"
++++)
++++
++++const (
++++ cgroupCpuacctStat = "cpuacct.stat"
++++ nanosecondsInSecond = 1000000000
++++)
++++
++++var clockTicks = uint64(system.GetClockTicks())
++++
++++type CpuacctGroup struct {
++++}
++++
++++func (s *CpuacctGroup) Set(d *data) error {
++++ // we just want to join this group even though we don't set anything
++++ if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++func (s *CpuacctGroup) Remove(d *data) error {
++++ return removePath(d.path("cpuacct"))
++++}
++++
++++func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
++++ userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
++++ if err != nil {
++++ return err
++++ }
++++
++++ totalUsage, err := getCgroupParamUint(path, "cpuacct.usage")
++++ if err != nil {
++++ return err
++++ }
++++
++++ percpuUsage, err := getPercpuUsage(path)
++++ if err != nil {
++++ return err
++++ }
++++
++++ stats.CpuStats.CpuUsage.TotalUsage = totalUsage
++++ stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
++++ stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage
++++ stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage
++++ return nil
++++}
++++
++++// Returns user and kernel usage breakdown in nanoseconds.
++++func getCpuUsageBreakdown(path string) (uint64, uint64, error) {
++++ userModeUsage := uint64(0)
++++ kernelModeUsage := uint64(0)
++++ const (
++++ userField = "user"
++++ systemField = "system"
++++ )
++++
++++ // Expected format:
++++ // user <usage in ticks>
++++ // system <usage in ticks>
++++ data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat))
++++ if err != nil {
++++ return 0, 0, err
++++ }
++++ fields := strings.Fields(string(data))
++++ if len(fields) != 4 {
++++ return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat))
++++ }
++++ if fields[0] != userField {
++++ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField)
++++ }
++++ if fields[2] != systemField {
++++ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField)
++++ }
++++ if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
++++ return 0, 0, err
++++ }
++++ if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
++++ return 0, 0, err
++++ }
++++
++++ return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil
++++}
++++
++++func getPercpuUsage(path string) ([]uint64, error) {
++++ percpuUsage := []uint64{}
++++ data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
++++ if err != nil {
++++ return percpuUsage, err
++++ }
++++ for _, value := range strings.Fields(string(data)) {
++++ value, err := strconv.ParseUint(value, 10, 64)
++++ if err != nil {
++++ return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err)
++++ }
++++ percpuUsage = append(percpuUsage, value)
++++ }
++++ return percpuUsage, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "bytes"
++++ "io/ioutil"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++type CpusetGroup struct {
++++}
++++
++++func (s *CpusetGroup) Set(d *data) error {
++++ // we don't want to join this cgroup unless it is specified
++++ if d.c.CpusetCpus != "" {
++++ dir, err := d.path("cpuset")
++++ if err != nil {
++++ return err
++++ }
++++
++++ return s.SetDir(dir, d.c.CpusetCpus, d.pid)
++++ }
++++
++++ return nil
++++}
++++
++++func (s *CpusetGroup) Remove(d *data) error {
++++ return removePath(d.path("cpuset"))
++++}
++++
++++func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
++++ return nil
++++}
++++
++++func (s *CpusetGroup) SetDir(dir, value string, pid int) error {
++++ if err := s.ensureParent(dir); err != nil {
++++ return err
++++ }
++++
++++ // because we are not using d.join we need to place the pid into the procs file
++++ // unlike the other subsystems
++++ if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
++++ return err
++++ }
++++
++++ if err := writeFile(dir, "cpuset.cpus", value); err != nil {
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
++++ if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
++++ return
++++ }
++++ if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil {
++++ return
++++ }
++++ return cpus, mems, nil
++++}
++++
++++// ensureParent ensures that the parent directory of current is created
++++// with the proper cpus and mems files copied from it's parent if the values
++++// are a file with a new line char
++++func (s *CpusetGroup) ensureParent(current string) error {
++++ parent := filepath.Dir(current)
++++
++++ if _, err := os.Stat(parent); err != nil {
++++ if !os.IsNotExist(err) {
++++ return err
++++ }
++++
++++ if err := s.ensureParent(parent); err != nil {
++++ return err
++++ }
++++ }
++++
++++ if err := os.MkdirAll(current, 0755); err != nil && !os.IsExist(err) {
++++ return err
++++ }
++++ return s.copyIfNeeded(current, parent)
++++}
++++
++++// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
++++// directory to the current directory if the file's contents are 0
++++func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
++++ var (
++++ err error
++++ currentCpus, currentMems []byte
++++ parentCpus, parentMems []byte
++++ )
++++
++++ if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil {
++++ return err
++++ }
++++ if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil {
++++ return err
++++ }
++++
++++ if s.isEmpty(currentCpus) {
++++ if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil {
++++ return err
++++ }
++++ }
++++ if s.isEmpty(currentMems) {
++++ if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil {
++++ return err
++++ }
++++ }
++++ return nil
++++}
++++
++++func (s *CpusetGroup) isEmpty(b []byte) bool {
++++ return len(bytes.Trim(b, "\n")) == 0
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import "github.com/docker/libcontainer/cgroups"
++++
++++type DevicesGroup struct {
++++}
++++
++++func (s *DevicesGroup) Set(d *data) error {
++++ dir, err := d.join("devices")
++++ if err != nil {
++++ return err
++++ }
++++
++++ if !d.c.AllowAllDevices {
++++ if err := writeFile(dir, "devices.deny", "a"); err != nil {
++++ return err
++++ }
++++
++++ for _, dev := range d.c.AllowedDevices {
++++ if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil {
++++ return err
++++ }
++++ }
++++ }
++++ return nil
++++}
++++
++++func (s *DevicesGroup) Remove(d *data) error {
++++ return removePath(d.path("devices"))
++++}
++++
++++func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "strings"
++++ "time"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++type FreezerGroup struct {
++++}
++++
++++func (s *FreezerGroup) Set(d *data) error {
++++ switch d.c.Freezer {
++++ case cgroups.Frozen, cgroups.Thawed:
++++ dir, err := d.path("freezer")
++++ if err != nil {
++++ return err
++++ }
++++
++++ if err := writeFile(dir, "freezer.state", string(d.c.Freezer)); err != nil {
++++ return err
++++ }
++++
++++ for {
++++ state, err := readFile(dir, "freezer.state")
++++ if err != nil {
++++ return err
++++ }
++++ if strings.TrimSpace(state) == string(d.c.Freezer) {
++++ break
++++ }
++++ time.Sleep(1 * time.Millisecond)
++++ }
++++ default:
++++ if _, err := d.join("freezer"); err != nil && !cgroups.IsNotFound(err) {
++++ return err
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++func (s *FreezerGroup) Remove(d *data) error {
++++ return removePath(d.path("freezer"))
++++}
++++
++++func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "bufio"
++++ "fmt"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++type MemoryGroup struct {
++++}
++++
++++func (s *MemoryGroup) Set(d *data) error {
++++ dir, err := d.join("memory")
++++ // only return an error for memory if it was specified
++++ if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) {
++++ return err
++++ }
++++ defer func() {
++++ if err != nil {
++++ os.RemoveAll(dir)
++++ }
++++ }()
++++
++++ // Only set values if some config was specified.
++++ if d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0 {
++++ if d.c.Memory != 0 {
++++ if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(d.c.Memory, 10)); err != nil {
++++ return err
++++ }
++++ }
++++ if d.c.MemoryReservation != 0 {
++++ if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(d.c.MemoryReservation, 10)); err != nil {
++++ return err
++++ }
++++ }
++++ // By default, MemorySwap is set to twice the size of RAM.
++++ // If you want to omit MemorySwap, set it to `-1'.
++++ if d.c.MemorySwap != -1 {
++++ if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(d.c.Memory*2, 10)); err != nil {
++++ return err
++++ }
++++ }
++++ }
++++ return nil
++++}
++++
++++func (s *MemoryGroup) Remove(d *data) error {
++++ return removePath(d.path("memory"))
++++}
++++
++++func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
++++ // Set stats from memory.stat.
++++ statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
++++ if err != nil {
++++ if os.IsNotExist(err) {
++++ return nil
++++ }
++++ return err
++++ }
++++ defer statsFile.Close()
++++
++++ sc := bufio.NewScanner(statsFile)
++++ for sc.Scan() {
++++ t, v, err := getCgroupParamKeyValue(sc.Text())
++++ if err != nil {
++++ return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err)
++++ }
++++ stats.MemoryStats.Stats[t] = v
++++ }
++++
++++ // Set memory usage and max historical usage.
++++ value, err := getCgroupParamUint(path, "memory.usage_in_bytes")
++++ if err != nil {
++++ return fmt.Errorf("failed to parse memory.usage_in_bytes - %v", err)
++++ }
++++ stats.MemoryStats.Usage = value
++++ value, err = getCgroupParamUint(path, "memory.max_usage_in_bytes")
++++ if err != nil {
++++ return fmt.Errorf("failed to parse memory.max_usage_in_bytes - %v", err)
++++ }
++++ stats.MemoryStats.MaxUsage = value
++++ value, err = getCgroupParamUint(path, "memory.failcnt")
++++ if err != nil {
++++ return fmt.Errorf("failed to parse memory.failcnt - %v", err)
++++ }
++++ stats.MemoryStats.Failcnt = value
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "testing"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++const (
++++ memoryStatContents = `cache 512
++++rss 1024`
++++ memoryUsageContents = "2048\n"
++++ memoryMaxUsageContents = "4096\n"
++++ memoryFailcnt = "100\n"
++++)
++++
++++func TestMemoryStats(t *testing.T) {
++++ helper := NewCgroupTestUtil("memory", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "memory.stat": memoryStatContents,
++++ "memory.usage_in_bytes": memoryUsageContents,
++++ "memory.max_usage_in_bytes": memoryMaxUsageContents,
++++ "memory.failcnt": memoryFailcnt,
++++ })
++++
++++ memory := &MemoryGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := memory.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
++++ expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
++++}
++++
++++func TestMemoryStatsNoStatFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("memory", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "memory.usage_in_bytes": memoryUsageContents,
++++ "memory.max_usage_in_bytes": memoryMaxUsageContents,
++++ })
++++
++++ memory := &MemoryGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := memory.GetStats(helper.CgroupPath, &actualStats)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++}
++++
++++func TestMemoryStatsNoUsageFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("memory", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "memory.stat": memoryStatContents,
++++ "memory.max_usage_in_bytes": memoryMaxUsageContents,
++++ })
++++
++++ memory := &MemoryGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := memory.GetStats(helper.CgroupPath, &actualStats)
++++ if err == nil {
++++ t.Fatal("Expected failure")
++++ }
++++}
++++
++++func TestMemoryStatsNoMaxUsageFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("memory", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "memory.stat": memoryStatContents,
++++ "memory.usage_in_bytes": memoryUsageContents,
++++ })
++++
++++ memory := &MemoryGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := memory.GetStats(helper.CgroupPath, &actualStats)
++++ if err == nil {
++++ t.Fatal("Expected failure")
++++ }
++++}
++++
++++func TestMemoryStatsBadStatFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("memory", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "memory.stat": "rss rss",
++++ "memory.usage_in_bytes": memoryUsageContents,
++++ "memory.max_usage_in_bytes": memoryMaxUsageContents,
++++ })
++++
++++ memory := &MemoryGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := memory.GetStats(helper.CgroupPath, &actualStats)
++++ if err == nil {
++++ t.Fatal("Expected failure")
++++ }
++++}
++++
++++func TestMemoryStatsBadUsageFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("memory", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "memory.stat": memoryStatContents,
++++ "memory.usage_in_bytes": "bad",
++++ "memory.max_usage_in_bytes": memoryMaxUsageContents,
++++ })
++++
++++ memory := &MemoryGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := memory.GetStats(helper.CgroupPath, &actualStats)
++++ if err == nil {
++++ t.Fatal("Expected failure")
++++ }
++++}
++++
++++func TestMemoryStatsBadMaxUsageFile(t *testing.T) {
++++ helper := NewCgroupTestUtil("memory", t)
++++ defer helper.cleanup()
++++ helper.writeFileContents(map[string]string{
++++ "memory.stat": memoryStatContents,
++++ "memory.usage_in_bytes": memoryUsageContents,
++++ "memory.max_usage_in_bytes": "bad",
++++ })
++++
++++ memory := &MemoryGroup{}
++++ actualStats := *cgroups.NewStats()
++++ err := memory.GetStats(helper.CgroupPath, &actualStats)
++++ if err == nil {
++++ t.Fatal("Expected failure")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package fs
++++
++++import (
++++ "fmt"
++++ "os"
++++ "path/filepath"
++++ "syscall"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++// NotifyOnOOM sends signals on the returned channel when the cgroup reaches
++++// its memory limit. The channel is closed when the cgroup is removed.
++++func NotifyOnOOM(c *cgroups.Cgroup) (<-chan struct{}, error) {
++++ d, err := getCgroupData(c, 0)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ return notifyOnOOM(d)
++++}
++++
++++func notifyOnOOM(d *data) (<-chan struct{}, error) {
++++ dir, err := d.path("memory")
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
++++ if syserr != 0 {
++++ return nil, syserr
++++ }
++++
++++ eventfd := os.NewFile(fd, "eventfd")
++++
++++ oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control"))
++++ if err != nil {
++++ eventfd.Close()
++++ return nil, err
++++ }
++++
++++ var (
++++ eventControlPath = filepath.Join(dir, "cgroup.event_control")
++++ data = fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd())
++++ )
++++
++++ if err := writeFile(dir, "cgroup.event_control", data); err != nil {
++++ eventfd.Close()
++++ oomControl.Close()
++++ return nil, err
++++ }
++++
++++ ch := make(chan struct{})
++++
++++ go func() {
++++ defer func() {
++++ close(ch)
++++ eventfd.Close()
++++ oomControl.Close()
++++ }()
++++
++++ buf := make([]byte, 8)
++++
++++ for {
++++ if _, err := eventfd.Read(buf); err != nil {
++++ return
++++ }
++++
++++ // When a cgroup is destroyed, an event is sent to eventfd.
++++ // So if the control path is gone, return instead of notifying.
++++ if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) {
++++ return
++++ }
++++
++++ ch <- struct{}{}
++++ }
++++ }()
++++
++++ return ch, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package fs
++++
++++import (
++++ "encoding/binary"
++++ "fmt"
++++ "syscall"
++++ "testing"
++++ "time"
++++)
++++
++++func TestNotifyOnOOM(t *testing.T) {
++++ helper := NewCgroupTestUtil("memory", t)
++++ defer helper.cleanup()
++++
++++ helper.writeFileContents(map[string]string{
++++ "memory.oom_control": "",
++++ "cgroup.event_control": "",
++++ })
++++
++++ var eventFd, oomControlFd int
++++
++++ ooms, err := notifyOnOOM(helper.CgroupData)
++++ if err != nil {
++++ t.Fatal("expected no error, got:", err)
++++ }
++++
++++ memoryPath, _ := helper.CgroupData.path("memory")
++++ data, err := readFile(memoryPath, "cgroup.event_control")
++++ if err != nil {
++++ t.Fatal("couldn't read event control file:", err)
++++ }
++++
++++ if _, err := fmt.Sscanf(data, "%d %d", &eventFd, &oomControlFd); err != nil {
++++ t.Fatalf("invalid control data %q: %s", data, err)
++++ }
++++
++++ // re-open the eventfd
++++ efd, err := syscall.Dup(eventFd)
++++ if err != nil {
++++ t.Fatal("unable to reopen eventfd:", err)
++++ }
++++ defer syscall.Close(efd)
++++
++++ if err != nil {
++++ t.Fatal("unable to dup event fd:", err)
++++ }
++++
++++ buf := make([]byte, 8)
++++ binary.LittleEndian.PutUint64(buf, 1)
++++
++++ if _, err := syscall.Write(efd, buf); err != nil {
++++ t.Fatal("unable to write to eventfd:", err)
++++ }
++++
++++ select {
++++ case <-ooms:
++++ case <-time.After(100 * time.Millisecond):
++++ t.Fatal("no notification on oom channel after 100ms")
++++ }
++++
++++ // simulate what happens when a cgroup is destroyed by cleaning up and then
++++ // writing to the eventfd.
++++ helper.cleanup()
++++ if _, err := syscall.Write(efd, buf); err != nil {
++++ t.Fatal("unable to write to eventfd:", err)
++++ }
++++
++++ // give things a moment to shut down
++++ select {
++++ case _, ok := <-ooms:
++++ if ok {
++++ t.Fatal("expected no oom to be triggered")
++++ }
++++ case <-time.After(100 * time.Millisecond):
++++ }
++++
++++ if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(oomControlFd), syscall.F_GETFD, 0); err != syscall.EBADF {
++++ t.Error("expected oom control to be closed")
++++ }
++++
++++ if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF {
++++ t.Error("expected event fd to be closed")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++type PerfEventGroup struct {
++++}
++++
++++func (s *PerfEventGroup) Set(d *data) error {
++++ // we just want to join this group even though we don't set anything
++++ if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
++++ return err
++++ }
++++ return nil
++++}
++++
++++func (s *PerfEventGroup) Remove(d *data) error {
++++ return removePath(d.path("perf_event"))
++++}
++++
++++func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "fmt"
++++ "log"
++++ "testing"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error {
++++ if len(expected) != len(actual) {
++++ return fmt.Errorf("blkioStatEntries length do not match")
++++ }
++++ for i, expValue := range expected {
++++ actValue := actual[i]
++++ if expValue != actValue {
++++ return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue)
++++ }
++++ }
++++ return nil
++++}
++++
++++func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
++++ if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
++++ log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
++++ t.Fail()
++++ }
++++
++++ if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
++++ log.Printf("blkio IoServicedRecursive do not match - %s\n", err)
++++ t.Fail()
++++ }
++++
++++ if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
++++ log.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
++++ t.Fail()
++++ }
++++
++++ if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
++++ log.Printf("blkio SectorsRecursive do not match - %s\n", err)
++++ t.Fail()
++++ }
++++}
++++
++++func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
++++ if expected != actual {
++++ log.Printf("Expected throttling data %v but found %v\n", expected, actual)
++++ t.Fail()
++++ }
++++}
++++
++++func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
++++ if expected.Usage != actual.Usage {
++++ log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
++++ t.Fail()
++++ }
++++ if expected.MaxUsage != actual.MaxUsage {
++++ log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
++++ t.Fail()
++++ }
++++ for key, expValue := range expected.Stats {
++++ actValue, ok := actual.Stats[key]
++++ if !ok {
++++ log.Printf("Expected memory stat key %s not found\n", key)
++++ t.Fail()
++++ }
++++ if expValue != actValue {
++++ log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
++++ t.Fail()
++++ }
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++/*
++++Utility for testing cgroup operations.
++++
++++Creates a mock of the cgroup filesystem for the duration of the test.
++++*/
++++package fs
++++
++++import (
++++ "fmt"
++++ "io/ioutil"
++++ "os"
++++ "testing"
++++)
++++
++++type cgroupTestUtil struct {
++++ // data to use in tests.
++++ CgroupData *data
++++
++++ // Path to the mock cgroup directory.
++++ CgroupPath string
++++
++++ // Temporary directory to store mock cgroup filesystem.
++++ tempDir string
++++ t *testing.T
++++}
++++
++++// Creates a new test util for the specified subsystem
++++func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil {
++++ d := &data{}
++++ tempDir, err := ioutil.TempDir("", fmt.Sprintf("%s_cgroup_test", subsystem))
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ d.root = tempDir
++++ testCgroupPath, err := d.path(subsystem)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ // Ensure the full mock cgroup path exists.
++++ err = os.MkdirAll(testCgroupPath, 0755)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t}
++++}
++++
++++func (c *cgroupTestUtil) cleanup() {
++++ os.RemoveAll(c.tempDir)
++++}
++++
++++// Write the specified contents on the mock of the specified cgroup files.
++++func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) {
++++ for file, contents := range fileContents {
++++ err := writeFile(c.CgroupPath, file, contents)
++++ if err != nil {
++++ c.t.Fatal(err)
++++ }
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "errors"
++++ "fmt"
++++ "io/ioutil"
++++ "path/filepath"
++++ "strconv"
++++ "strings"
++++)
++++
++++var (
++++ ErrNotSupportStat = errors.New("stats are not supported for subsystem")
++++ ErrNotValidFormat = errors.New("line is not a valid key value format")
++++)
++++
++++// Saturates negative values at zero and returns a uint64.
++++// Due to kernel bugs, some of the memory cgroup stats can be negative.
++++func parseUint(s string, base, bitSize int) (uint64, error) {
++++ value, err := strconv.ParseUint(s, base, bitSize)
++++ if err != nil {
++++ intValue, intErr := strconv.ParseInt(s, base, bitSize)
++++ // 1. Handle negative values greater than MinInt64 (and)
++++ // 2. Handle negative values lesser than MinInt64
++++ if intErr == nil && intValue < 0 {
++++ return 0, nil
++++ } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 {
++++ return 0, nil
++++ }
++++
++++ return value, err
++++ }
++++
++++ return value, nil
++++}
++++
++++// Parses a cgroup param and returns as name, value
++++// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234
++++func getCgroupParamKeyValue(t string) (string, uint64, error) {
++++ parts := strings.Fields(t)
++++ switch len(parts) {
++++ case 2:
++++ value, err := parseUint(parts[1], 10, 64)
++++ if err != nil {
++++ return "", 0, fmt.Errorf("Unable to convert param value (%q) to uint64: %v", parts[1], err)
++++ }
++++
++++ return parts[0], value, nil
++++ default:
++++ return "", 0, ErrNotValidFormat
++++ }
++++}
++++
++++// Gets a single uint64 value from the specified cgroup file.
++++func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) {
++++ contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
++++ if err != nil {
++++ return 0, err
++++ }
++++
++++ return parseUint(strings.TrimSpace(string(contents)), 10, 64)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package fs
++++
++++import (
++++ "io/ioutil"
++++ "math"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++ "testing"
++++)
++++
++++const (
++++ cgroupFile = "cgroup.file"
++++ floatValue = 2048.0
++++ floatString = "2048"
++++)
++++
++++func TestGetCgroupParamsInt(t *testing.T) {
++++ // Setup tempdir.
++++ tempDir, err := ioutil.TempDir("", "cgroup_utils_test")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ defer os.RemoveAll(tempDir)
++++ tempFile := filepath.Join(tempDir, cgroupFile)
++++
++++ // Success.
++++ err = ioutil.WriteFile(tempFile, []byte(floatString), 0755)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ value, err := getCgroupParamUint(tempDir, cgroupFile)
++++ if err != nil {
++++ t.Fatal(err)
++++ } else if value != floatValue {
++++ t.Fatalf("Expected %d to equal %f", value, floatValue)
++++ }
++++
++++ // Success with new line.
++++ err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ value, err = getCgroupParamUint(tempDir, cgroupFile)
++++ if err != nil {
++++ t.Fatal(err)
++++ } else if value != floatValue {
++++ t.Fatalf("Expected %d to equal %f", value, floatValue)
++++ }
++++
++++ // Success with negative values
++++ err = ioutil.WriteFile(tempFile, []byte("-12345"), 0755)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ value, err = getCgroupParamUint(tempDir, cgroupFile)
++++ if err != nil {
++++ t.Fatal(err)
++++ } else if value != 0 {
++++ t.Fatalf("Expected %d to equal %f", value, 0)
++++ }
++++
++++ // Success with negative values lesser than min int64
++++ s := strconv.FormatFloat(math.MinInt64, 'f', -1, 64)
++++ err = ioutil.WriteFile(tempFile, []byte(s), 0755)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ value, err = getCgroupParamUint(tempDir, cgroupFile)
++++ if err != nil {
++++ t.Fatal(err)
++++ } else if value != 0 {
++++ t.Fatalf("Expected %d to equal %f", value, 0)
++++ }
++++
++++ // Not a float.
++++ err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ _, err = getCgroupParamUint(tempDir, cgroupFile)
++++ if err == nil {
++++ t.Fatal("Expecting error, got none")
++++ }
++++
++++ // Unknown file.
++++ err = os.Remove(tempFile)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ _, err = getCgroupParamUint(tempDir, cgroupFile)
++++ if err == nil {
++++ t.Fatal("Expecting error, got none")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package cgroups
++++
++++type ThrottlingData struct {
++++ // Number of periods with throttling active
++++ Periods uint64 `json:"periods,omitempty"`
++++ // Number of periods when the container hit its throttling limit.
++++ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
++++ // Aggregate time the container was throttled for in nanoseconds.
++++ ThrottledTime uint64 `json:"throttled_time,omitempty"`
++++}
++++
++++// All CPU stats are aggregate since container inception.
++++type CpuUsage struct {
++++ // Total CPU time consumed.
++++ // Units: nanoseconds.
++++ TotalUsage uint64 `json:"total_usage,omitempty"`
++++ // Total CPU time consumed per core.
++++ // Units: nanoseconds.
++++ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
++++ // Time spent by tasks of the cgroup in kernel mode.
++++ // Units: nanoseconds.
++++ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
++++ // Time spent by tasks of the cgroup in user mode.
++++ // Units: nanoseconds.
++++ UsageInUsermode uint64 `json:"usage_in_usermode"`
++++}
++++
++++type CpuStats struct {
++++ CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
++++ ThrottlingData ThrottlingData `json:"throlling_data,omitempty"`
++++}
++++
++++type MemoryStats struct {
++++ // current res_counter usage for memory
++++ Usage uint64 `json:"usage,omitempty"`
++++ // maximum usage ever recorded.
++++ MaxUsage uint64 `json:"max_usage,omitempty"`
++++ // TODO(vishh): Export these as stronger types.
++++ // all the stats exported via memory.stat.
++++ Stats map[string]uint64 `json:"stats,omitempty"`
++++ // number of times memory usage hits limits.
++++ Failcnt uint64 `json:"failcnt"`
++++}
++++
++++type BlkioStatEntry struct {
++++ Major uint64 `json:"major,omitempty"`
++++ Minor uint64 `json:"minor,omitempty"`
++++ Op string `json:"op,omitempty"`
++++ Value uint64 `json:"value,omitempty"`
++++}
++++
++++type BlkioStats struct {
++++ // number of bytes tranferred to and from the block device
++++ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
++++ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recusrive,omitempty"`
++++ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
++++ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
++++}
++++
++++type Stats struct {
++++ CpuStats CpuStats `json:"cpu_stats,omitempty"`
++++ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
++++ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
++++}
++++
++++func NewStats() *Stats {
++++ memoryStats := MemoryStats{Stats: make(map[string]uint64)}
++++ return &Stats{MemoryStats: memoryStats}
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !linux
++++
++++package systemd
++++
++++import (
++++ "fmt"
++++
++++ "github.com/docker/libcontainer/cgroups"
++++)
++++
++++func UseSystemd() bool {
++++ return false
++++}
++++
++++func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) {
++++ return nil, fmt.Errorf("Systemd not supported")
++++}
++++
++++func GetPids(c *cgroups.Cgroup) ([]int, error) {
++++ return nil, fmt.Errorf("Systemd not supported")
++++}
++++
++++func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error {
++++ return fmt.Errorf("Systemd not supported")
++++}
++++
++++func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
++++ return nil, fmt.Errorf("Systemd not supported")
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package systemd
++++
++++import (
++++ "bytes"
++++ "fmt"
++++ "io/ioutil"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++ "strings"
++++ "sync"
++++ "time"
++++
++++ systemd "github.com/coreos/go-systemd/dbus"
++++ "github.com/docker/libcontainer/cgroups"
++++ "github.com/docker/libcontainer/cgroups/fs"
++++ "github.com/godbus/dbus"
++++)
++++
++++type systemdCgroup struct {
++++ cgroup *cgroups.Cgroup
++++}
++++
++++type subsystem interface {
++++ GetStats(string, *cgroups.Stats) error
++++}
++++
++++var (
++++ connLock sync.Mutex
++++ theConn *systemd.Conn
++++ hasStartTransientUnit bool
++++ subsystems = map[string]subsystem{
++++ "devices": &fs.DevicesGroup{},
++++ "memory": &fs.MemoryGroup{},
++++ "cpu": &fs.CpuGroup{},
++++ "cpuset": &fs.CpusetGroup{},
++++ "cpuacct": &fs.CpuacctGroup{},
++++ "blkio": &fs.BlkioGroup{},
++++ "perf_event": &fs.PerfEventGroup{},
++++ "freezer": &fs.FreezerGroup{},
++++ }
++++)
++++
++++func UseSystemd() bool {
++++ s, err := os.Stat("/run/systemd/system")
++++ if err != nil || !s.IsDir() {
++++ return false
++++ }
++++
++++ connLock.Lock()
++++ defer connLock.Unlock()
++++
++++ if theConn == nil {
++++ var err error
++++ theConn, err = systemd.New()
++++ if err != nil {
++++ return false
++++ }
++++
++++ // Assume we have StartTransientUnit
++++ hasStartTransientUnit = true
++++
++++ // But if we get UnknownMethod error we don't
++++ if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil {
++++ if dbusError, ok := err.(dbus.Error); ok {
++++ if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
++++ hasStartTransientUnit = false
++++ }
++++ }
++++ }
++++ }
++++ return hasStartTransientUnit
++++}
++++
++++func getIfaceForUnit(unitName string) string {
++++ if strings.HasSuffix(unitName, ".scope") {
++++ return "Scope"
++++ }
++++ if strings.HasSuffix(unitName, ".service") {
++++ return "Service"
++++ }
++++ return "Unit"
++++}
++++
++++func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) {
++++ var (
++++ unitName = getUnitName(c)
++++ slice = "system.slice"
++++ properties []systemd.Property
++++ res = &systemdCgroup{}
++++ )
++++
++++ res.cgroup = c
++++
++++ if c.Slice != "" {
++++ slice = c.Slice
++++ }
++++
++++ properties = append(properties,
++++ systemd.Property{"Slice", dbus.MakeVariant(slice)},
++++ systemd.Property{"Description", dbus.MakeVariant("docker container " + c.Name)},
++++ systemd.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})},
++++ )
++++
++++ // Always enable accounting, this gets us the same behaviour as the fs implementation,
++++ // plus the kernel has some problems with joining the memory cgroup at a later time.
++++ properties = append(properties,
++++ systemd.Property{"MemoryAccounting", dbus.MakeVariant(true)},
++++ systemd.Property{"CPUAccounting", dbus.MakeVariant(true)},
++++ systemd.Property{"BlockIOAccounting", dbus.MakeVariant(true)})
++++
++++ if c.Memory != 0 {
++++ properties = append(properties,
++++ systemd.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))})
++++ }
++++ // TODO: MemoryReservation and MemorySwap not available in systemd
++++
++++ if c.CpuShares != 0 {
++++ properties = append(properties,
++++ systemd.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))})
++++ }
++++
++++ if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil {
++++ return nil, err
++++ }
++++
++++ if !c.AllowAllDevices {
++++ if err := joinDevices(c, pid); err != nil {
++++ return nil, err
++++ }
++++ }
++++
++++ // -1 disables memorySwap
++++ if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) {
++++ if err := joinMemory(c, pid); err != nil {
++++ return nil, err
++++ }
++++
++++ }
++++
++++ // we need to manually join the freezer cgroup in systemd because it does not currently support it
++++ // via the dbus api
++++ if err := joinFreezer(c, pid); err != nil {
++++ return nil, err
++++ }
++++
++++ if c.CpusetCpus != "" {
++++ if err := joinCpuset(c, pid); err != nil {
++++ return nil, err
++++ }
++++ }
++++
++++ return res, nil
++++}
++++
++++func writeFile(dir, file, data string) error {
++++ return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
++++}
++++
++++func (c *systemdCgroup) Paths() (map[string]string, error) {
++++ paths := make(map[string]string)
++++
++++ for sysname := range subsystems {
++++ subsystemPath, err := getSubsystemPath(c.cgroup, sysname)
++++ if err != nil {
++++ // Don't fail if a cgroup hierarchy was not found, just skip this subsystem
++++ if cgroups.IsNotFound(err) {
++++ continue
++++ }
++++
++++ return nil, err
++++ }
++++
++++ paths[sysname] = subsystemPath
++++ }
++++
++++ return paths, nil
++++}
++++
++++func (c *systemdCgroup) Cleanup() error {
++++ // systemd cleans up, we don't need to do much
++++ paths, err := c.Paths()
++++ if err != nil {
++++ return err
++++ }
++++
++++ for _, path := range paths {
++++ os.RemoveAll(path)
++++ }
++++
++++ return nil
++++}
++++
++++func joinFreezer(c *cgroups.Cgroup, pid int) error {
++++ path, err := getSubsystemPath(c, "freezer")
++++ if err != nil {
++++ return err
++++ }
++++
++++ if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
++++ return err
++++ }
++++
++++ return ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700)
++++}
++++
++++func getSubsystemPath(c *cgroups.Cgroup, subsystem string) (string, error) {
++++ mountpoint, err := cgroups.FindCgroupMountpoint(subsystem)
++++ if err != nil {
++++ return "", err
++++ }
++++
++++ initPath, err := cgroups.GetInitCgroupDir(subsystem)
++++ if err != nil {
++++ return "", err
++++ }
++++
++++ slice := "system.slice"
++++ if c.Slice != "" {
++++ slice = c.Slice
++++ }
++++
++++ return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil
++++}
++++
++++func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error {
++++ path, err := getSubsystemPath(c, "freezer")
++++ if err != nil {
++++ return err
++++ }
++++
++++ if err := ioutil.WriteFile(filepath.Join(path, "freezer.state"), []byte(state), 0); err != nil {
++++ return err
++++ }
++++ for {
++++ state_, err := ioutil.ReadFile(filepath.Join(path, "freezer.state"))
++++ if err != nil {
++++ return err
++++ }
++++ if string(state) == string(bytes.TrimSpace(state_)) {
++++ break
++++ }
++++ time.Sleep(1 * time.Millisecond)
++++ }
++++ return nil
++++}
++++
++++func GetPids(c *cgroups.Cgroup) ([]int, error) {
++++ path, err := getSubsystemPath(c, "cpu")
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ return cgroups.ReadProcsFile(path)
++++}
++++
++++func getUnitName(c *cgroups.Cgroup) string {
++++ return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name)
++++}
++++
++++/*
++++ * This would be nicer to get from the systemd API when accounting
++++ * is enabled, but sadly there is no way to do that yet.
++++ * The lack of this functionality in the API & the approach taken
++++ * is guided by
++++ * http://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#readingaccountinginformation.
++++ */
++++func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
++++ stats := cgroups.NewStats()
++++
++++ for sysname, sys := range subsystems {
++++ subsystemPath, err := getSubsystemPath(c, sysname)
++++ if err != nil {
++++ // Don't fail if a cgroup hierarchy was not found, just skip this subsystem
++++ if cgroups.IsNotFound(err) {
++++ continue
++++ }
++++
++++ return nil, err
++++ }
++++
++++ if err := sys.GetStats(subsystemPath, stats); err != nil {
++++ return nil, err
++++ }
++++ }
++++
++++ return stats, nil
++++}
++++
++++// Atm we can't use the systemd device support because of two missing things:
++++// * Support for wildcards to allow mknod on any device
++++// * Support for wildcards to allow /dev/pts support
++++//
++++// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is
++++// in wide use. When both these are availalable we will be able to switch, but need to keep the old
++++// implementation for backwards compat.
++++//
++++// Note: we can't use systemd to set up the initial limits, and then change the cgroup
++++// because systemd will re-write the device settings if it needs to re-apply the cgroup context.
++++// This happens at least for v208 when any sibling unit is started.
++++func joinDevices(c *cgroups.Cgroup, pid int) error {
++++ path, err := getSubsystemPath(c, "devices")
++++ if err != nil {
++++ return err
++++ }
++++
++++ if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
++++ return err
++++ }
++++
++++ if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil {
++++ return err
++++ }
++++
++++ if err := writeFile(path, "devices.deny", "a"); err != nil {
++++ return err
++++ }
++++
++++ for _, dev := range c.AllowedDevices {
++++ if err := writeFile(path, "devices.allow", dev.GetCgroupAllowString()); err != nil {
++++ return err
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++func joinMemory(c *cgroups.Cgroup, pid int) error {
++++ memorySwap := c.MemorySwap
++++
++++ if memorySwap == 0 {
++++ // By default, MemorySwap is set to twice the size of RAM.
++++ memorySwap = c.Memory * 2
++++ }
++++
++++ path, err := getSubsystemPath(c, "memory")
++++ if err != nil {
++++ return err
++++ }
++++
++++ return ioutil.WriteFile(filepath.Join(path, "memory.memsw.limit_in_bytes"), []byte(strconv.FormatInt(memorySwap, 10)), 0700)
++++}
++++
++++// systemd does not atm set up the cpuset controller, so we must manually
++++// join it. Additionally that is a very finicky controller where each
++++// level must have a full setup as the default for a new directory is "no cpus"
++++func joinCpuset(c *cgroups.Cgroup, pid int) error {
++++ path, err := getSubsystemPath(c, "cpuset")
++++ if err != nil {
++++ return err
++++ }
++++
++++ s := &fs.CpusetGroup{}
++++
++++ return s.SetDir(path, c.CpusetCpus, pid)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package cgroups
++++
++++import (
++++ "bufio"
++++ "fmt"
++++ "io"
++++ "io/ioutil"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++ "strings"
++++
++++ "github.com/docker/docker/pkg/mount"
++++)
++++
++++// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
++++func FindCgroupMountpoint(subsystem string) (string, error) {
++++ mounts, err := mount.GetMounts()
++++ if err != nil {
++++ return "", err
++++ }
++++
++++ for _, mount := range mounts {
++++ if mount.Fstype == "cgroup" {
++++ for _, opt := range strings.Split(mount.VfsOpts, ",") {
++++ if opt == subsystem {
++++ return mount.Mountpoint, nil
++++ }
++++ }
++++ }
++++ }
++++
++++ return "", NewNotFoundError(subsystem)
++++}
++++
++++type Mount struct {
++++ Mountpoint string
++++ Subsystems []string
++++}
++++
++++func (m Mount) GetThisCgroupDir() (string, error) {
++++ if len(m.Subsystems) == 0 {
++++ return "", fmt.Errorf("no subsystem for mount")
++++ }
++++
++++ return GetThisCgroupDir(m.Subsystems[0])
++++}
++++
++++func GetCgroupMounts() ([]Mount, error) {
++++ mounts, err := mount.GetMounts()
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ all, err := GetAllSubsystems()
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ allMap := make(map[string]bool)
++++ for _, s := range all {
++++ allMap[s] = true
++++ }
++++
++++ res := []Mount{}
++++ for _, mount := range mounts {
++++ if mount.Fstype == "cgroup" {
++++ m := Mount{Mountpoint: mount.Mountpoint}
++++
++++ for _, opt := range strings.Split(mount.VfsOpts, ",") {
++++ if strings.HasPrefix(opt, "name=") {
++++ m.Subsystems = append(m.Subsystems, opt)
++++ }
++++ if allMap[opt] {
++++ m.Subsystems = append(m.Subsystems, opt)
++++ }
++++ }
++++ res = append(res, m)
++++ }
++++ }
++++ return res, nil
++++}
++++
++++// Returns all the cgroup subsystems supported by the kernel
++++func GetAllSubsystems() ([]string, error) {
++++ f, err := os.Open("/proc/cgroups")
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer f.Close()
++++
++++ subsystems := []string{}
++++
++++ s := bufio.NewScanner(f)
++++ for s.Scan() {
++++ if err := s.Err(); err != nil {
++++ return nil, err
++++ }
++++ text := s.Text()
++++ if text[0] != '#' {
++++ parts := strings.Fields(text)
++++ if len(parts) >= 4 && parts[3] != "0" {
++++ subsystems = append(subsystems, parts[0])
++++ }
++++ }
++++ }
++++ return subsystems, nil
++++}
++++
++++// Returns the relative path to the cgroup docker is running in.
++++func GetThisCgroupDir(subsystem string) (string, error) {
++++ f, err := os.Open("/proc/self/cgroup")
++++ if err != nil {
++++ return "", err
++++ }
++++ defer f.Close()
++++
++++ return ParseCgroupFile(subsystem, f)
++++}
++++
++++func GetInitCgroupDir(subsystem string) (string, error) {
++++ f, err := os.Open("/proc/1/cgroup")
++++ if err != nil {
++++ return "", err
++++ }
++++ defer f.Close()
++++
++++ return ParseCgroupFile(subsystem, f)
++++}
++++
++++func ReadProcsFile(dir string) ([]int, error) {
++++ f, err := os.Open(filepath.Join(dir, "cgroup.procs"))
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer f.Close()
++++
++++ var (
++++ s = bufio.NewScanner(f)
++++ out = []int{}
++++ )
++++
++++ for s.Scan() {
++++ if t := s.Text(); t != "" {
++++ pid, err := strconv.Atoi(t)
++++ if err != nil {
++++ return nil, err
++++ }
++++ out = append(out, pid)
++++ }
++++ }
++++ return out, nil
++++}
++++
++++func ParseCgroupFile(subsystem string, r io.Reader) (string, error) {
++++ s := bufio.NewScanner(r)
++++
++++ for s.Scan() {
++++ if err := s.Err(); err != nil {
++++ return "", err
++++ }
++++
++++ text := s.Text()
++++ parts := strings.Split(text, ":")
++++
++++ for _, subs := range strings.Split(parts[1], ",") {
++++ if subs == subsystem {
++++ return parts[2], nil
++++ }
++++ }
++++ }
++++
++++ return "", NewNotFoundError(subsystem)
++++}
++++
++++func pathExists(path string) bool {
++++ if _, err := os.Stat(path); err != nil {
++++ return false
++++ }
++++ return true
++++}
++++
++++func EnterPid(cgroupPaths map[string]string, pid int) error {
++++ for _, path := range cgroupPaths {
++++ if pathExists(path) {
++++ if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"),
++++ []byte(strconv.Itoa(pid)), 0700); err != nil {
++++ return err
++++ }
++++ }
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libcontainer
++++
++++import (
++++ "github.com/docker/libcontainer/cgroups"
++++ "github.com/docker/libcontainer/mount"
++++ "github.com/docker/libcontainer/network"
++++)
++++
++++type MountConfig mount.MountConfig
++++
++++type Network network.Network
++++
++++// Config defines configuration options for executing a process inside a contained environment.
++++type Config struct {
++++ // Mount specific options.
++++ MountConfig *MountConfig `json:"mount_config,omitempty"`
++++
++++ // Pathname to container's root filesystem
++++ RootFs string `json:"root_fs,omitempty"`
++++
++++ // Hostname optionally sets the container's hostname if provided
++++ Hostname string `json:"hostname,omitempty"`
++++
++++ // User will set the uid and gid of the executing process running inside the container
++++ User string `json:"user,omitempty"`
++++
++++ // WorkingDir will change the processes current working directory inside the container's rootfs
++++ WorkingDir string `json:"working_dir,omitempty"`
++++
++++ // Env will populate the processes environment with the provided values
++++ // Any values from the parent processes will be cleared before the values
++++ // provided in Env are provided to the process
++++ Env []string `json:"environment,omitempty"`
++++
++++ // Tty when true will allocate a pty slave on the host for access by the container's process
++++ // and ensure that it is mounted inside the container's rootfs
++++ Tty bool `json:"tty,omitempty"`
++++
++++ // Namespaces specifies the container's namespaces that it should setup when cloning the init process
++++ // If a namespace is not provided that namespace is shared from the container's parent process
++++ Namespaces map[string]bool `json:"namespaces,omitempty"`
++++
++++ // Capabilities specify the capabilities to keep when executing the process inside the container
++++ // All capbilities not specified will be dropped from the processes capability mask
++++ Capabilities []string `json:"capabilities,omitempty"`
++++
++++ // Networks specifies the container's network setup to be created
++++ Networks []*Network `json:"networks,omitempty"`
++++
++++ // Routes can be specified to create entries in the route table as the container is started
++++ Routes []*Route `json:"routes,omitempty"`
++++
++++ // Cgroups specifies specific cgroup settings for the various subsystems that the container is
++++ // placed into to limit the resources the container has available
++++ Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"`
++++
++++ // AppArmorProfile specifies the profile to apply to the process running in the container and is
++++ // change at the time the process is execed
++++ AppArmorProfile string `json:"apparmor_profile,omitempty"`
++++
++++ // ProcessLabel specifies the label to apply to the process running in the container. It is
++++ // commonly used by selinux
++++ ProcessLabel string `json:"process_label,omitempty"`
++++
++++ // RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and
++++ // /proc/bus
++++ RestrictSys bool `json:"restrict_sys,omitempty"`
++++}
++++
++++// Routes can be specified to create entries in the route table as the container is started
++++//
++++// All of destination, source, and gateway should be either IPv4 or IPv6.
++++// One of the three options must be present, and ommitted entries will use their
++++// IP family default for the route table. For IPv4 for example, setting the
++++// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
++++// destination of 0.0.0.0(or *) when viewed in the route table.
++++type Route struct {
++++ // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6
++++ Destination string `json:"destination,omitempty"`
++++
++++ // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6
++++ Source string `json:"source,omitempty"`
++++
++++ // Sets the gateway. Accepts IPv4 and IPv6
++++ Gateway string `json:"gateway,omitempty"`
++++
++++ // The device to set this route up for, for example: eth0
++++ InterfaceName string `json:"interface_name,omitempty"`
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libcontainer
++++
++++import (
++++ "encoding/json"
++++ "os"
++++ "path/filepath"
++++ "testing"
++++
++++ "github.com/docker/libcontainer/devices"
++++)
++++
++++// Checks whether the expected capability is specified in the capabilities.
++++func contains(expected string, values []string) bool {
++++ for _, v := range values {
++++ if v == expected {
++++ return true
++++ }
++++ }
++++ return false
++++}
++++
++++func containsDevice(expected *devices.Device, values []*devices.Device) bool {
++++ for _, d := range values {
++++ if d.Path == expected.Path &&
++++ d.CgroupPermissions == expected.CgroupPermissions &&
++++ d.FileMode == expected.FileMode &&
++++ d.MajorNumber == expected.MajorNumber &&
++++ d.MinorNumber == expected.MinorNumber &&
++++ d.Type == expected.Type {
++++ return true
++++ }
++++ }
++++ return false
++++}
++++
++++func loadConfig(name string) (*Config, error) {
++++ f, err := os.Open(filepath.Join("sample_configs", name))
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer f.Close()
++++
++++ var container *Config
++++ if err := json.NewDecoder(f).Decode(&container); err != nil {
++++ return nil, err
++++ }
++++
++++ return container, nil
++++}
++++
++++func TestConfigJsonFormat(t *testing.T) {
++++ container, err := loadConfig("attach_to_bridge.json")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if container.Hostname != "koye" {
++++ t.Log("hostname is not set")
++++ t.Fail()
++++ }
++++
++++ if !container.Tty {
++++ t.Log("tty should be set to true")
++++ t.Fail()
++++ }
++++
++++ if !container.Namespaces["NEWNET"] {
++++ t.Log("namespaces should contain NEWNET")
++++ t.Fail()
++++ }
++++
++++ if container.Namespaces["NEWUSER"] {
++++ t.Log("namespaces should not contain NEWUSER")
++++ t.Fail()
++++ }
++++
++++ if contains("SYS_ADMIN", container.Capabilities) {
++++ t.Log("SYS_ADMIN should not be enabled in capabilities mask")
++++ t.Fail()
++++ }
++++
++++ if !contains("MKNOD", container.Capabilities) {
++++ t.Log("MKNOD should be enabled in capabilities mask")
++++ t.Fail()
++++ }
++++
++++ if !contains("SYS_CHROOT", container.Capabilities) {
++++ t.Log("capabilities mask should contain SYS_CHROOT")
++++ t.Fail()
++++ }
++++
++++ for _, n := range container.Networks {
++++ if n.Type == "veth" {
++++ if n.Bridge != "docker0" {
++++ t.Logf("veth bridge should be docker0 but received %q", n.Bridge)
++++ t.Fail()
++++ }
++++
++++ if n.Address != "172.17.0.101/16" {
++++ t.Logf("veth address should be 172.17.0.101/61 but received %q", n.Address)
++++ t.Fail()
++++ }
++++
++++ if n.VethPrefix != "veth" {
++++ t.Logf("veth prefix should be veth but received %q", n.VethPrefix)
++++ t.Fail()
++++ }
++++
++++ if n.Gateway != "172.17.42.1" {
++++ t.Logf("veth gateway should be 172.17.42.1 but received %q", n.Gateway)
++++ t.Fail()
++++ }
++++
++++ if n.Mtu != 1500 {
++++ t.Logf("veth mtu should be 1500 but received %d", n.Mtu)
++++ t.Fail()
++++ }
++++
++++ break
++++ }
++++ }
++++
++++ for _, d := range devices.DefaultSimpleDevices {
++++ if !containsDevice(d, container.MountConfig.DeviceNodes) {
++++ t.Logf("expected device configuration for %s", d.Path)
++++ t.Fail()
++++ }
++++ }
++++
++++ if !container.RestrictSys {
++++ t.Log("expected restrict sys to be true")
++++ t.Fail()
++++ }
++++}
++++
++++func TestApparmorProfile(t *testing.T) {
++++ container, err := loadConfig("apparmor.json")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if container.AppArmorProfile != "docker-default" {
++++ t.Fatalf("expected apparmor profile to be docker-default but received %q", container.AppArmorProfile)
++++ }
++++}
++++
++++func TestSelinuxLabels(t *testing.T) {
++++ container, err := loadConfig("selinux.json")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ label := "system_u:system_r:svirt_lxc_net_t:s0:c164,c475"
++++
++++ if container.ProcessLabel != label {
++++ t.Fatalf("expected process label %q but received %q", label, container.ProcessLabel)
++++ }
++++ if container.MountConfig.MountLabel != label {
++++ t.Fatalf("expected mount label %q but received %q", label, container.MountConfig.MountLabel)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package console
++++
++++import (
++++ "fmt"
++++ "os"
++++ "path/filepath"
++++ "syscall"
++++ "unsafe"
++++
++++ "github.com/docker/libcontainer/label"
++++)
++++
++++// Setup initializes the proper /dev/console inside the rootfs path
++++func Setup(rootfs, consolePath, mountLabel string) error {
++++ oldMask := syscall.Umask(0000)
++++ defer syscall.Umask(oldMask)
++++
++++ if err := os.Chmod(consolePath, 0600); err != nil {
++++ return err
++++ }
++++
++++ if err := os.Chown(consolePath, 0, 0); err != nil {
++++ return err
++++ }
++++
++++ if err := label.SetFileLabel(consolePath, mountLabel); err != nil {
++++ return fmt.Errorf("set file label %s %s", consolePath, err)
++++ }
++++
++++ dest := filepath.Join(rootfs, "dev/console")
++++
++++ f, err := os.Create(dest)
++++ if err != nil && !os.IsExist(err) {
++++ return fmt.Errorf("create %s %s", dest, err)
++++ }
++++
++++ if f != nil {
++++ f.Close()
++++ }
++++
++++ if err := syscall.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil {
++++ return fmt.Errorf("bind %s to %s %s", consolePath, dest, err)
++++ }
++++
++++ return nil
++++}
++++
++++func OpenAndDup(consolePath string) error {
++++ slave, err := OpenTerminal(consolePath, syscall.O_RDWR)
++++ if err != nil {
++++ return fmt.Errorf("open terminal %s", err)
++++ }
++++
++++ if err := syscall.Dup2(int(slave.Fd()), 0); err != nil {
++++ return err
++++ }
++++
++++ if err := syscall.Dup2(int(slave.Fd()), 1); err != nil {
++++ return err
++++ }
++++
++++ return syscall.Dup2(int(slave.Fd()), 2)
++++}
++++
++++// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
++++// Unlockpt should be called before opening the slave side of a pseudoterminal.
++++func Unlockpt(f *os.File) error {
++++ var u int
++++
++++ return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
++++}
++++
++++// Ptsname retrieves the name of the first available pts for the given master.
++++func Ptsname(f *os.File) (string, error) {
++++ var n int
++++
++++ if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
++++ return "", err
++++ }
++++
++++ return fmt.Sprintf("/dev/pts/%d", n), nil
++++}
++++
++++// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the
++++// pts name for use as the pty slave inside the container
++++func CreateMasterAndConsole() (*os.File, string, error) {
++++ master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
++++ if err != nil {
++++ return nil, "", err
++++ }
++++
++++ console, err := Ptsname(master)
++++ if err != nil {
++++ return nil, "", err
++++ }
++++
++++ if err := Unlockpt(master); err != nil {
++++ return nil, "", err
++++ }
++++
++++ return master, console, nil
++++}
++++
++++// OpenPtmx opens /dev/ptmx, i.e. the PTY master.
++++func OpenPtmx() (*os.File, error) {
++++ // O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all.
++++ return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
++++}
++++
++++// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC
++++// used to open the pty slave inside the container namespace
++++func OpenTerminal(name string, flag int) (*os.File, error) {
++++ r, e := syscall.Open(name, flag, 0)
++++ if e != nil {
++++ return nil, &os.PathError{Op: "open", Path: name, Err: e}
++++ }
++++ return os.NewFile(uintptr(r), name), nil
++++}
++++
++++func Ioctl(fd uintptr, flag, data uintptr) error {
++++ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 {
++++ return err
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++/*
++++NOTE: The API is in flux and mainly not implemented. Proceed with caution until further notice.
++++*/
++++package libcontainer
++++
++++// A libcontainer container object.
++++//
++++// Each container is thread-safe within the same process. Since a container can
++++// be destroyed by a separate process, any function may return that the container
++++// was not found.
++++type Container interface {
++++ // Returns the ID of the container
++++ ID() string
++++
++++ // Returns the current run state of the container.
++++ //
++++ // Errors:
++++ // ContainerDestroyed - Container no longer exists,
++++ // SystemError - System error.
++++ RunState() (*RunState, Error)
++++
++++ // Returns the current config of the container.
++++ Config() *Config
++++
++++ // Start a process inside the container. Returns the PID of the new process (in the caller process's namespace) and a channel that will return the exit status of the process whenever it dies.
++++ //
++++ // Errors:
++++ // ContainerDestroyed - Container no longer exists,
++++ // ConfigInvalid - config is invalid,
++++ // ContainerPaused - Container is paused,
++++ // SystemError - System error.
++++ Start(config *ProcessConfig) (pid int, exitChan chan int, err Error)
++++
++++ // Destroys the container after killing all running processes.
++++ //
++++ // Any event registrations are removed before the container is destroyed.
++++ // No error is returned if the container is already destroyed.
++++ //
++++ // Errors:
++++ // SystemError - System error.
++++ Destroy() Error
++++
++++ // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process.
++++ //
++++ // Errors:
++++ // ContainerDestroyed - Container no longer exists,
++++ // SystemError - System error.
++++ //
++++ // Some of the returned PIDs may no longer refer to processes in the Container, unless
++++ // the Container state is PAUSED in which case every PID in the slice is valid.
++++ Processes() ([]int, Error)
++++
++++ // Returns statistics for the container.
++++ //
++++ // Errors:
++++ // ContainerDestroyed - Container no longer exists,
++++ // SystemError - System error.
++++ Stats() (*ContainerStats, Error)
++++
++++ // If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses
++++ // the execution of any user processes. Asynchronously, when the container finished being paused the
++++ // state is changed to PAUSED.
++++ // If the Container state is PAUSED, do nothing.
++++ //
++++ // Errors:
++++ // ContainerDestroyed - Container no longer exists,
++++ // SystemError - System error.
++++ Pause() Error
++++
++++ // If the Container state is PAUSED, resumes the execution of any user processes in the
++++ // Container before setting the Container state to RUNNING.
++++ // If the Container state is RUNNING, do nothing.
++++ //
++++ // Errors:
++++ // ContainerDestroyed - Container no longer exists,
++++ // SystemError - System error.
++++ Resume() Error
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package devices
++++
++++var (
++++ // These are devices that are to be both allowed and created.
++++
++++ DefaultSimpleDevices = []*Device{
++++ // /dev/null and zero
++++ {
++++ Path: "/dev/null",
++++ Type: 'c',
++++ MajorNumber: 1,
++++ MinorNumber: 3,
++++ CgroupPermissions: "rwm",
++++ FileMode: 0666,
++++ },
++++ {
++++ Path: "/dev/zero",
++++ Type: 'c',
++++ MajorNumber: 1,
++++ MinorNumber: 5,
++++ CgroupPermissions: "rwm",
++++ FileMode: 0666,
++++ },
++++
++++ {
++++ Path: "/dev/full",
++++ Type: 'c',
++++ MajorNumber: 1,
++++ MinorNumber: 7,
++++ CgroupPermissions: "rwm",
++++ FileMode: 0666,
++++ },
++++
++++ // consoles and ttys
++++ {
++++ Path: "/dev/tty",
++++ Type: 'c',
++++ MajorNumber: 5,
++++ MinorNumber: 0,
++++ CgroupPermissions: "rwm",
++++ FileMode: 0666,
++++ },
++++
++++ // /dev/urandom,/dev/random
++++ {
++++ Path: "/dev/urandom",
++++ Type: 'c',
++++ MajorNumber: 1,
++++ MinorNumber: 9,
++++ CgroupPermissions: "rwm",
++++ FileMode: 0666,
++++ },
++++ {
++++ Path: "/dev/random",
++++ Type: 'c',
++++ MajorNumber: 1,
++++ MinorNumber: 8,
++++ CgroupPermissions: "rwm",
++++ FileMode: 0666,
++++ },
++++ }
++++
++++ DefaultAllowedDevices = append([]*Device{
++++ // allow mknod for any device
++++ {
++++ Type: 'c',
++++ MajorNumber: Wildcard,
++++ MinorNumber: Wildcard,
++++ CgroupPermissions: "m",
++++ },
++++ {
++++ Type: 'b',
++++ MajorNumber: Wildcard,
++++ MinorNumber: Wildcard,
++++ CgroupPermissions: "m",
++++ },
++++
++++ {
++++ Path: "/dev/console",
++++ Type: 'c',
++++ MajorNumber: 5,
++++ MinorNumber: 1,
++++ CgroupPermissions: "rwm",
++++ },
++++ {
++++ Path: "/dev/tty0",
++++ Type: 'c',
++++ MajorNumber: 4,
++++ MinorNumber: 0,
++++ CgroupPermissions: "rwm",
++++ },
++++ {
++++ Path: "/dev/tty1",
++++ Type: 'c',
++++ MajorNumber: 4,
++++ MinorNumber: 1,
++++ CgroupPermissions: "rwm",
++++ },
++++ // /dev/pts/ - pts namespaces are "coming soon"
++++ {
++++ Path: "",
++++ Type: 'c',
++++ MajorNumber: 136,
++++ MinorNumber: Wildcard,
++++ CgroupPermissions: "rwm",
++++ },
++++ {
++++ Path: "",
++++ Type: 'c',
++++ MajorNumber: 5,
++++ MinorNumber: 2,
++++ CgroupPermissions: "rwm",
++++ },
++++
++++ // tuntap
++++ {
++++ Path: "",
++++ Type: 'c',
++++ MajorNumber: 10,
++++ MinorNumber: 200,
++++ CgroupPermissions: "rwm",
++++ },
++++
++++ /*// fuse
++++ {
++++ Path: "",
++++ Type: 'c',
++++ MajorNumber: 10,
++++ MinorNumber: 229,
++++ CgroupPermissions: "rwm",
++++ },
++++
++++ // rtc
++++ {
++++ Path: "",
++++ Type: 'c',
++++ MajorNumber: 254,
++++ MinorNumber: 0,
++++ CgroupPermissions: "rwm",
++++ },
++++ */
++++ }, DefaultSimpleDevices...)
++++
++++ DefaultAutoCreatedDevices = append([]*Device{
++++ {
++++ // /dev/fuse is created but not allowed.
++++ // This is to allow java to work. Because java
++++ // Insists on there being a /dev/fuse
++++ // https://github.com/docker/docker/issues/514
++++ // https://github.com/docker/docker/issues/2393
++++ //
++++ Path: "/dev/fuse",
++++ Type: 'c',
++++ MajorNumber: 10,
++++ MinorNumber: 229,
++++ CgroupPermissions: "rwm",
++++ },
++++ }, DefaultSimpleDevices...)
++++)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package devices
++++
++++import (
++++ "errors"
++++ "fmt"
++++ "io/ioutil"
++++ "os"
++++ "path/filepath"
++++ "syscall"
++++)
++++
++++const (
++++ Wildcard = -1
++++)
++++
++++var (
++++ ErrNotADeviceNode = errors.New("not a device node")
++++)
++++
++++// Testing dependencies
++++var (
++++ osLstat = os.Lstat
++++ ioutilReadDir = ioutil.ReadDir
++++)
++++
++++type Device struct {
++++ Type rune `json:"type,omitempty"`
++++ Path string `json:"path,omitempty"` // It is fine if this is an empty string in the case that you are using Wildcards
++++ MajorNumber int64 `json:"major_number,omitempty"` // Use the wildcard constant for wildcards.
++++ MinorNumber int64 `json:"minor_number,omitempty"` // Use the wildcard constant for wildcards.
++++ CgroupPermissions string `json:"cgroup_permissions,omitempty"` // Typically just "rwm"
++++ FileMode os.FileMode `json:"file_mode,omitempty"` // The permission bits of the file's mode
++++ Uid uint32 `json:"uid,omitempty"`
++++ Gid uint32 `json:"gid,omitempty"`
++++}
++++
++++func GetDeviceNumberString(deviceNumber int64) string {
++++ if deviceNumber == Wildcard {
++++ return "*"
++++ } else {
++++ return fmt.Sprintf("%d", deviceNumber)
++++ }
++++}
++++
++++func (device *Device) GetCgroupAllowString() string {
++++ return fmt.Sprintf("%c %s:%s %s", device.Type, GetDeviceNumberString(device.MajorNumber), GetDeviceNumberString(device.MinorNumber), device.CgroupPermissions)
++++}
++++
++++// Given the path to a device and it's cgroup_permissions(which cannot be easilly queried) look up the information about a linux device and return that information as a Device struct.
++++func GetDevice(path, cgroupPermissions string) (*Device, error) {
++++ fileInfo, err := osLstat(path)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ var (
++++ devType rune
++++ mode = fileInfo.Mode()
++++ fileModePermissionBits = os.FileMode.Perm(mode)
++++ )
++++
++++ switch {
++++ case mode&os.ModeDevice == 0:
++++ return nil, ErrNotADeviceNode
++++ case mode&os.ModeCharDevice != 0:
++++ fileModePermissionBits |= syscall.S_IFCHR
++++ devType = 'c'
++++ default:
++++ fileModePermissionBits |= syscall.S_IFBLK
++++ devType = 'b'
++++ }
++++
++++ stat_t, ok := fileInfo.Sys().(*syscall.Stat_t)
++++ if !ok {
++++ return nil, fmt.Errorf("cannot determine the device number for device %s", path)
++++ }
++++ devNumber := int(stat_t.Rdev)
++++
++++ return &Device{
++++ Type: devType,
++++ Path: path,
++++ MajorNumber: Major(devNumber),
++++ MinorNumber: Minor(devNumber),
++++ CgroupPermissions: cgroupPermissions,
++++ FileMode: fileModePermissionBits,
++++ Uid: stat_t.Uid,
++++ Gid: stat_t.Gid,
++++ }, nil
++++}
++++
++++func GetHostDeviceNodes() ([]*Device, error) {
++++ return getDeviceNodes("/dev")
++++}
++++
++++func getDeviceNodes(path string) ([]*Device, error) {
++++ files, err := ioutilReadDir(path)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ out := []*Device{}
++++ for _, f := range files {
++++ if f.IsDir() {
++++ switch f.Name() {
++++ case "pts", "shm", "fd":
++++ continue
++++ default:
++++ sub, err := getDeviceNodes(filepath.Join(path, f.Name()))
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ out = append(out, sub...)
++++ continue
++++ }
++++ }
++++
++++ device, err := GetDevice(filepath.Join(path, f.Name()), "rwm")
++++ if err != nil {
++++ if err == ErrNotADeviceNode {
++++ continue
++++ }
++++ return nil, err
++++ }
++++ out = append(out, device)
++++ }
++++
++++ return out, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package devices
++++
++++import (
++++ "errors"
++++ "os"
++++ "testing"
++++)
++++
++++func TestGetDeviceLstatFailure(t *testing.T) {
++++ testError := errors.New("test error")
++++
++++ // Override os.Lstat to inject error.
++++ osLstat = func(path string) (os.FileInfo, error) {
++++ return nil, testError
++++ }
++++
++++ _, err := GetDevice("", "")
++++ if err != testError {
++++ t.Fatalf("Unexpected error %v, expected %v", err, testError)
++++ }
++++}
++++
++++func TestGetHostDeviceNodesIoutilReadDirFailure(t *testing.T) {
++++ testError := errors.New("test error")
++++
++++ // Override ioutil.ReadDir to inject error.
++++ ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
++++ return nil, testError
++++ }
++++
++++ _, err := GetHostDeviceNodes()
++++ if err != testError {
++++ t.Fatalf("Unexpected error %v, expected %v", err, testError)
++++ }
++++}
++++
++++func TestGetHostDeviceNodesIoutilReadDirDeepFailure(t *testing.T) {
++++ testError := errors.New("test error")
++++ called := false
++++
++++ // Override ioutil.ReadDir to inject error after the first call.
++++ ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
++++ if called {
++++ return nil, testError
++++ }
++++ called = true
++++
++++ // Provoke a second call.
++++ fi, err := os.Lstat("/tmp")
++++ if err != nil {
++++ t.Fatalf("Unexpected error %v", err)
++++ }
++++
++++ return []os.FileInfo{fi}, nil
++++ }
++++
++++ _, err := GetHostDeviceNodes()
++++ if err != testError {
++++ t.Fatalf("Unexpected error %v, expected %v", err, testError)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package devices
++++
++++/*
++++
++++This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved.
++++
++++You can read what they are here:
++++
++++ - http://www.makelinux.net/ldd3/chp-3-sect-2
++++ - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94
++++
++++Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in <linux/kdev_t.h> as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9
++++
++++*/
++++
++++func Major(devNumber int) int64 {
++++ return int64((devNumber >> 8) & 0xfff)
++++}
++++
++++func Minor(devNumber int) int64 {
++++ return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
++++}
++++
++++func Mkdev(majorNumber int64, minorNumber int64) int {
++++ return int((majorNumber << 8) | (minorNumber & 0xff) | ((minorNumber & 0xfff00) << 12))
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libcontainer
++++
++++// API error code type.
++++type ErrorCode int
++++
++++// API error codes.
++++const (
++++ // Factory errors
++++ IdInUse ErrorCode = iota
++++ InvalidIdFormat
++++ // TODO: add Load errors
++++
++++ // Container errors
++++ ContainerDestroyed
++++ ContainerPaused
++++
++++ // Common errors
++++ ConfigInvalid
++++ SystemError
++++)
++++
++++// API Error type.
++++type Error interface {
++++ error
++++
++++ // Returns the stack trace, if any, which identifies the
++++ // point at which the error occurred.
++++ Stack() []byte
++++
++++ // Returns a verbose string including the error message
++++ // and a representation of the stack trace suitable for
++++ // printing.
++++ Detail() string
++++
++++ // Returns the error code for this error.
++++ Code() ErrorCode
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libcontainer
++++
++++type Factory interface {
++++
++++ // Creates a new container with the given id and starts the initial process inside it.
++++ // id must be a string containing only letters, digits and underscores and must contain
++++ // between 1 and 1024 characters, inclusive.
++++ //
++++ // The id must not already be in use by an existing container. Containers created using
++++ // a factory with the same path (and file system) must have distinct ids.
++++ //
++++ // Returns the new container with a running process.
++++ //
++++ // Errors:
++++ // IdInUse - id is already in use by a container
++++ // InvalidIdFormat - id has incorrect format
++++ // ConfigInvalid - config is invalid
++++ // SystemError - System error
++++ //
++++ // On error, any partially created container parts are cleaned up (the operation is atomic).
++++ Create(id string, config *Config) (Container, Error)
++++
++++ // Load takes an ID for an existing container and reconstructs the container
++++ // from the state.
++++ //
++++ // Errors:
++++ // Path does not exist
++++ // Container is stopped
++++ // System error
++++ // TODO: fix description
++++ Load(id string) (Container, Error)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !selinux !linux
++++
++++package label
++++
++++// InitLabels returns the process label and file labels to be used within
++++// the container. A list of options can be passed into this function to alter
++++// the labels.
++++func InitLabels(options []string) (string, string, error) {
++++ return "", "", nil
++++}
++++
++++func GenLabels(options string) (string, string, error) {
++++ return "", "", nil
++++}
++++
++++func FormatMountLabel(src string, mountLabel string) string {
++++ return src
++++}
++++
++++func SetProcessLabel(processLabel string) error {
++++ return nil
++++}
++++
++++func SetFileLabel(path string, fileLabel string) error {
++++ return nil
++++}
++++
++++func Relabel(path string, fileLabel string, relabel string) error {
++++ return nil
++++}
++++
++++func GetPidLabel(pid int) (string, error) {
++++ return "", nil
++++}
++++
++++func Init() {
++++}
++++
++++func ReserveLabel(label string) error {
++++ return nil
++++}
++++
++++func UnreserveLabel(label string) error {
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build selinux,linux
++++
++++package label
++++
++++import (
++++ "fmt"
++++ "strings"
++++
++++ "github.com/docker/libcontainer/selinux"
++++)
++++
++++// InitLabels returns the process label and file labels to be used within
++++// the container. A list of options can be passed into this function to alter
++++// the labels. The labels returned will include a random MCS String, that is
++++// guaranteed to be unique.
++++func InitLabels(options []string) (string, string, error) {
++++ if !selinux.SelinuxEnabled() {
++++ return "", "", nil
++++ }
++++ var err error
++++ processLabel, mountLabel := selinux.GetLxcContexts()
++++ if processLabel != "" {
++++ pcon := selinux.NewContext(processLabel)
++++ mcon := selinux.NewContext(mountLabel)
++++ for _, opt := range options {
++++ if opt == "disable" {
++++ return "", "", nil
++++ }
++++ if i := strings.Index(opt, ":"); i == -1 {
++++ return "", "", fmt.Errorf("Bad SELinux Option")
++++ }
++++ con := strings.SplitN(opt, ":", 2)
++++ pcon[con[0]] = con[1]
++++ if con[0] == "level" || con[0] == "user" {
++++ mcon[con[0]] = con[1]
++++ }
++++ }
++++ processLabel = pcon.Get()
++++ mountLabel = mcon.Get()
++++ }
++++ return processLabel, mountLabel, err
++++}
++++
++++// DEPRECATED: The GenLabels function is only to be used during the transition to the official API.
++++func GenLabels(options string) (string, string, error) {
++++ return InitLabels(strings.Fields(options))
++++}
++++
++++// FormatMountLabel returns a string to be used by the mount command.
++++// The format of this string will be used to alter the labeling of the mountpoint.
++++// The string returned is suitable to be used as the options field of the mount command.
++++// If you need to have additional mount point options, you can pass them in as
++++// the first parameter. Second parameter is the label that you wish to apply
++++// to all content in the mount point.
++++func FormatMountLabel(src, mountLabel string) string {
++++ if mountLabel != "" {
++++ switch src {
++++ case "":
++++ src = fmt.Sprintf("context=%q", mountLabel)
++++ default:
++++ src = fmt.Sprintf("%s,context=%q", src, mountLabel)
++++ }
++++ }
++++ return src
++++}
++++
++++// SetProcessLabel takes a process label and tells the kernel to assign the
++++// label to the next program executed by the current process.
++++func SetProcessLabel(processLabel string) error {
++++ if processLabel == "" {
++++ return nil
++++ }
++++ return selinux.Setexeccon(processLabel)
++++}
++++
++++// GetProcessLabel returns the process label that the kernel will assign
++++// to the next program executed by the current process. If "" is returned
++++// this indicates that the default labeling will happen for the process.
++++func GetProcessLabel() (string, error) {
++++ return selinux.Getexeccon()
++++}
++++
++++// SetFileLabel modifies the "path" label to the specified file label
++++func SetFileLabel(path string, fileLabel string) error {
++++ if selinux.SelinuxEnabled() && fileLabel != "" {
++++ return selinux.Setfilecon(path, fileLabel)
++++ }
++++ return nil
++++}
++++
++++// Change the label of path to the filelabel string. If the relabel string
++++// is "z", relabel will change the MCS label to s0. This will allow all
++++// containers to share the content. If the relabel string is a "Z" then
++++// the MCS label should continue to be used. SELinux will use this field
++++// to make sure the content can not be shared by other containes.
++++func Relabel(path string, fileLabel string, relabel string) error {
++++ if fileLabel == "" {
++++ return nil
++++ }
++++ if relabel == "z" {
++++ c := selinux.NewContext(fileLabel)
++++ c["level"] = "s0"
++++ fileLabel = c.Get()
++++ }
++++ return selinux.Chcon(path, fileLabel, true)
++++}
++++
++++// GetPidLabel will return the label of the process running with the specified pid
++++func GetPidLabel(pid int) (string, error) {
++++ return selinux.Getpidcon(pid)
++++}
++++
++++// Init initialises the labeling system
++++func Init() {
++++ selinux.SelinuxEnabled()
++++}
++++
++++// ReserveLabel will record the fact that the MCS label has already been used.
++++// This will prevent InitLabels from using the MCS label in a newly created
++++// container
++++func ReserveLabel(label string) error {
++++ selinux.ReserveLabel(label)
++++ return nil
++++}
++++
++++// UnreserveLabel will remove the reservation of the MCS label.
++++// This will allow InitLabels to use the MCS label in a newly created
++++// containers
++++func UnreserveLabel(label string) error {
++++ selinux.FreeLxcContexts(label)
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build selinux,linux
++++
++++package label
++++
++++import (
++++ "testing"
++++
++++ "github.com/docker/libcontainer/selinux"
++++)
++++
++++func TestInit(t *testing.T) {
++++ if selinux.SelinuxEnabled() {
++++ var testNull []string
++++ plabel, mlabel, err := InitLabels(testNull)
++++ if err != nil {
++++ t.Log("InitLabels Failed")
++++ t.Fatal(err)
++++ }
++++ testDisabled := []string{"disable"}
++++ plabel, mlabel, err = InitLabels(testDisabled)
++++ if err != nil {
++++ t.Log("InitLabels Disabled Failed")
++++ t.Fatal(err)
++++ }
++++ if plabel != "" {
++++ t.Log("InitLabels Disabled Failed")
++++ t.Fatal()
++++ }
++++ testUser := []string{"user:user_u", "role:user_r", "type:user_t", "level:s0:c1,c15"}
++++ plabel, mlabel, err = InitLabels(testUser)
++++ if err != nil {
++++ t.Log("InitLabels User Failed")
++++ t.Fatal(err)
++++ }
++++ if plabel != "user_u:user_r:user_t:s0:c1,c15" || mlabel != "user_u:object_r:svirt_sandbox_file_t:s0:c1,c15" {
++++ t.Log("InitLabels User Failed")
++++ t.Log(plabel, mlabel)
++++ t.Fatal(err)
++++ }
++++
++++ testBadData := []string{"user", "role:user_r", "type:user_t", "level:s0:c1,c15"}
++++ plabel, mlabel, err = InitLabels(testBadData)
++++ if err == nil {
++++ t.Log("InitLabels Bad Failed")
++++ t.Fatal(err)
++++ }
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package mount
++++
++++import (
++++ "fmt"
++++ "os"
++++ "path/filepath"
++++ "syscall"
++++
++++ "github.com/docker/libcontainer/label"
++++ "github.com/docker/libcontainer/mount/nodes"
++++)
++++
++++// default mount point flags
++++const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
++++
++++type mount struct {
++++ source string
++++ path string
++++ device string
++++ flags int
++++ data string
++++}
++++
++++// InitializeMountNamespace sets up the devices, mount points, and filesystems for use inside a
++++// new mount namespace.
++++func InitializeMountNamespace(rootfs, console string, sysReadonly bool, mountConfig *MountConfig) error {
++++ var (
++++ err error
++++ flag = syscall.MS_PRIVATE
++++ )
++++
++++ if mountConfig.NoPivotRoot {
++++ flag = syscall.MS_SLAVE
++++ }
++++
++++ if err := syscall.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil {
++++ return fmt.Errorf("mounting / with flags %X %s", (flag | syscall.MS_REC), err)
++++ }
++++
++++ if err := syscall.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
++++ return fmt.Errorf("mouting %s as bind %s", rootfs, err)
++++ }
++++
++++ if err := mountSystem(rootfs, sysReadonly, mountConfig); err != nil {
++++ return fmt.Errorf("mount system %s", err)
++++ }
++++
++++ // apply any user specified mounts within the new mount namespace
++++ for _, m := range mountConfig.Mounts {
++++ if err := m.Mount(rootfs, mountConfig.MountLabel); err != nil {
++++ return err
++++ }
++++ }
++++
++++ if err := nodes.CreateDeviceNodes(rootfs, mountConfig.DeviceNodes); err != nil {
++++ return fmt.Errorf("create device nodes %s", err)
++++ }
++++
++++ if err := SetupPtmx(rootfs, console, mountConfig.MountLabel); err != nil {
++++ return err
++++ }
++++
++++ // stdin, stdout and stderr could be pointing to /dev/null from parent namespace.
++++ // Re-open them inside this namespace.
++++ if err := reOpenDevNull(rootfs); err != nil {
++++ return fmt.Errorf("Failed to reopen /dev/null %s", err)
++++ }
++++
++++ if err := setupDevSymlinks(rootfs); err != nil {
++++ return fmt.Errorf("dev symlinks %s", err)
++++ }
++++
++++ if err := syscall.Chdir(rootfs); err != nil {
++++ return fmt.Errorf("chdir into %s %s", rootfs, err)
++++ }
++++
++++ if mountConfig.NoPivotRoot {
++++ err = MsMoveRoot(rootfs)
++++ } else {
++++ err = PivotRoot(rootfs)
++++ }
++++
++++ if err != nil {
++++ return err
++++ }
++++
++++ if mountConfig.ReadonlyFs {
++++ if err := SetReadonly(); err != nil {
++++ return fmt.Errorf("set readonly %s", err)
++++ }
++++ }
++++
++++ syscall.Umask(0022)
++++
++++ return nil
++++}
++++
++++// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts
++++// inside the mount namespace
++++func mountSystem(rootfs string, sysReadonly bool, mountConfig *MountConfig) error {
++++ for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, sysReadonly) {
++++ if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {
++++ return fmt.Errorf("mkdirall %s %s", m.path, err)
++++ }
++++ if err := syscall.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {
++++ return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err)
++++ }
++++ }
++++ return nil
++++}
++++
++++func createIfNotExists(path string, isDir bool) error {
++++ if _, err := os.Stat(path); err != nil {
++++ if os.IsNotExist(err) {
++++ if isDir {
++++ if err := os.MkdirAll(path, 0755); err != nil {
++++ return err
++++ }
++++ } else {
++++ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
++++ return err
++++ }
++++ f, err := os.OpenFile(path, os.O_CREATE, 0755)
++++ if err != nil {
++++ return err
++++ }
++++ f.Close()
++++ }
++++ }
++++ }
++++ return nil
++++}
++++
++++func setupDevSymlinks(rootfs string) error {
++++ var links = [][2]string{
++++ {"/proc/self/fd", "/dev/fd"},
++++ {"/proc/self/fd/0", "/dev/stdin"},
++++ {"/proc/self/fd/1", "/dev/stdout"},
++++ {"/proc/self/fd/2", "/dev/stderr"},
++++ }
++++
++++ // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink
++++ // in /dev if it exists in /proc.
++++ if _, err := os.Stat("/proc/kcore"); err == nil {
++++ links = append(links, [2]string{"/proc/kcore", "/dev/kcore"})
++++ }
++++
++++ for _, link := range links {
++++ var (
++++ src = link[0]
++++ dst = filepath.Join(rootfs, link[1])
++++ )
++++
++++ if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) {
++++ return fmt.Errorf("symlink %s %s %s", src, dst, err)
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++// TODO: this is crappy right now and should be cleaned up with a better way of handling system and
++++// standard bind mounts allowing them to be more dynamic
++++func newSystemMounts(rootfs, mountLabel string, sysReadonly bool) []mount {
++++ systemMounts := []mount{
++++ {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags},
++++ {source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)},
++++ {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)},
++++ {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)},
++++ }
++++
++++ sysMountFlags := defaultMountFlags
++++ if sysReadonly {
++++ sysMountFlags |= syscall.MS_RDONLY
++++ }
++++
++++ systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: sysMountFlags})
++++
++++ return systemMounts
++++}
++++
++++// Is stdin, stdout or stderr were to be pointing to '/dev/null',
++++// this method will make them point to '/dev/null' from within this namespace.
++++func reOpenDevNull(rootfs string) error {
++++ var stat, devNullStat syscall.Stat_t
++++ file, err := os.Open(filepath.Join(rootfs, "/dev/null"))
++++ if err != nil {
++++ return fmt.Errorf("Failed to open /dev/null - %s", err)
++++ }
++++ defer file.Close()
++++ if err = syscall.Fstat(int(file.Fd()), &devNullStat); err != nil {
++++ return fmt.Errorf("Failed to stat /dev/null - %s", err)
++++ }
++++ for fd := 0; fd < 3; fd++ {
++++ if err = syscall.Fstat(fd, &stat); err != nil {
++++ return fmt.Errorf("Failed to stat fd %d - %s", fd, err)
++++ }
++++ if stat.Rdev == devNullStat.Rdev {
++++ // Close and re-open the fd.
++++ if err = syscall.Dup2(int(file.Fd()), fd); err != nil {
++++ return fmt.Errorf("Failed to dup fd %d to fd %d - %s", file.Fd(), fd, err)
++++ }
++++ }
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package mount
++++
++++import (
++++ "fmt"
++++ "os"
++++ "path/filepath"
++++ "syscall"
++++
++++ "github.com/docker/docker/pkg/symlink"
++++ "github.com/docker/libcontainer/label"
++++)
++++
++++type Mount struct {
++++ Type string `json:"type,omitempty"`
++++ Source string `json:"source,omitempty"` // Source path, in the host namespace
++++ Destination string `json:"destination,omitempty"` // Destination path, in the container
++++ Writable bool `json:"writable,omitempty"`
++++ Relabel string `json:"relabel,omitempty"` // Relabel source if set, "z" indicates shared, "Z" indicates unshared
++++ Private bool `json:"private,omitempty"`
++++ Slave bool `json:"slave,omitempty"`
++++}
++++
++++func (m *Mount) Mount(rootfs, mountLabel string) error {
++++ switch m.Type {
++++ case "bind":
++++ return m.bindMount(rootfs, mountLabel)
++++ case "tmpfs":
++++ return m.tmpfsMount(rootfs, mountLabel)
++++ default:
++++ return fmt.Errorf("unsupported mount type %s for %s", m.Type, m.Destination)
++++ }
++++}
++++
++++func (m *Mount) bindMount(rootfs, mountLabel string) error {
++++ var (
++++ flags = syscall.MS_BIND | syscall.MS_REC
++++ dest = filepath.Join(rootfs, m.Destination)
++++ )
++++
++++ if !m.Writable {
++++ flags = flags | syscall.MS_RDONLY
++++ }
++++
++++ if m.Slave {
++++ flags = flags | syscall.MS_SLAVE
++++ }
++++
++++ stat, err := os.Stat(m.Source)
++++ if err != nil {
++++ return err
++++ }
++++
++++ // FIXME: (crosbymichael) This does not belong here and should be done a layer above
++++ dest, err = symlink.FollowSymlinkInScope(dest, rootfs)
++++ if err != nil {
++++ return err
++++ }
++++
++++ if err := createIfNotExists(dest, stat.IsDir()); err != nil {
++++ return fmt.Errorf("creating new bind mount target %s", err)
++++ }
++++
++++ if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil {
++++ return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err)
++++ }
++++
++++ if !m.Writable {
++++ if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil {
++++ return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err)
++++ }
++++ }
++++
++++ if m.Relabel != "" {
++++ if err := label.Relabel(m.Source, mountLabel, m.Relabel); err != nil {
++++ return fmt.Errorf("relabeling %s to %s %s", m.Source, mountLabel, err)
++++ }
++++ }
++++
++++ if m.Private {
++++ if err := syscall.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil {
++++ return fmt.Errorf("mounting %s private %s", dest, err)
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++func (m *Mount) tmpfsMount(rootfs, mountLabel string) error {
++++ var (
++++ err error
++++ l = label.FormatMountLabel("", mountLabel)
++++ dest = filepath.Join(rootfs, m.Destination)
++++ )
++++
++++ // FIXME: (crosbymichael) This does not belong here and should be done a layer above
++++ if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil {
++++ return err
++++ }
++++
++++ if err := createIfNotExists(dest, true); err != nil {
++++ return fmt.Errorf("creating new tmpfs mount target %s", err)
++++ }
++++
++++ if err := syscall.Mount("tmpfs", dest, "tmpfs", uintptr(defaultMountFlags), l); err != nil {
++++ return fmt.Errorf("%s mounting %s in tmpfs", err, dest)
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package mount
++++
++++import (
++++ "errors"
++++
++++ "github.com/docker/libcontainer/devices"
++++)
++++
++++var ErrUnsupported = errors.New("Unsupported method")
++++
++++type MountConfig struct {
++++ // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
++++ // This is a common option when the container is running in ramdisk
++++ NoPivotRoot bool `json:"no_pivot_root,omitempty"`
++++
++++ // ReadonlyFs will remount the container's rootfs as readonly where only externally mounted
++++ // bind mounts are writtable
++++ ReadonlyFs bool `json:"readonly_fs,omitempty"`
++++
++++ // Mounts specify additional source and destination paths that will be mounted inside the container's
++++ // rootfs and mount namespace if specified
++++ Mounts []*Mount `json:"mounts,omitempty"`
++++
++++ // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
++++ DeviceNodes []*devices.Device `json:"device_nodes,omitempty"`
++++
++++ MountLabel string `json:"mount_label,omitempty"`
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package mount
++++
++++import (
++++ "fmt"
++++ "syscall"
++++)
++++
++++func MsMoveRoot(rootfs string) error {
++++ if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil {
++++ return fmt.Errorf("mount move %s into / %s", rootfs, err)
++++ }
++++
++++ if err := syscall.Chroot("."); err != nil {
++++ return fmt.Errorf("chroot . %s", err)
++++ }
++++
++++ return syscall.Chdir("/")
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package nodes
++++
++++import (
++++ "fmt"
++++ "os"
++++ "path/filepath"
++++ "syscall"
++++
++++ "github.com/docker/libcontainer/devices"
++++)
++++
++++// Create the device nodes in the container.
++++func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error {
++++ oldMask := syscall.Umask(0000)
++++ defer syscall.Umask(oldMask)
++++
++++ for _, node := range nodesToCreate {
++++ if err := CreateDeviceNode(rootfs, node); err != nil {
++++ return err
++++ }
++++ }
++++ return nil
++++}
++++
++++// Creates the device node in the rootfs of the container.
++++func CreateDeviceNode(rootfs string, node *devices.Device) error {
++++ var (
++++ dest = filepath.Join(rootfs, node.Path)
++++ parent = filepath.Dir(dest)
++++ )
++++
++++ if err := os.MkdirAll(parent, 0755); err != nil {
++++ return err
++++ }
++++
++++ fileMode := node.FileMode
++++ switch node.Type {
++++ case 'c':
++++ fileMode |= syscall.S_IFCHR
++++ case 'b':
++++ fileMode |= syscall.S_IFBLK
++++ default:
++++ return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path)
++++ }
++++
++++ if err := syscall.Mknod(dest, uint32(fileMode), devices.Mkdev(node.MajorNumber, node.MinorNumber)); err != nil && !os.IsExist(err) {
++++ return fmt.Errorf("mknod %s %s", node.Path, err)
++++ }
++++
++++ if err := syscall.Chown(dest, int(node.Uid), int(node.Gid)); err != nil {
++++ return fmt.Errorf("chown %s to %d:%d", node.Path, node.Uid, node.Gid)
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !linux
++++
++++package nodes
++++
++++import (
++++ "errors"
++++
++++ "github.com/docker/libcontainer/devices"
++++)
++++
++++func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error {
++++ return errors.New("Unsupported method")
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package mount
++++
++++import (
++++ "fmt"
++++ "io/ioutil"
++++ "os"
++++ "path/filepath"
++++ "syscall"
++++)
++++
++++func PivotRoot(rootfs string) error {
++++ pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root")
++++ if err != nil {
++++ return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err)
++++ }
++++
++++ if err := syscall.PivotRoot(rootfs, pivotDir); err != nil {
++++ return fmt.Errorf("pivot_root %s", err)
++++ }
++++
++++ if err := syscall.Chdir("/"); err != nil {
++++ return fmt.Errorf("chdir / %s", err)
++++ }
++++
++++ // path to pivot dir now changed, update
++++ pivotDir = filepath.Join("/", filepath.Base(pivotDir))
++++ if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
++++ return fmt.Errorf("unmount pivot_root dir %s", err)
++++ }
++++
++++ return os.Remove(pivotDir)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package mount
++++
++++import (
++++ "fmt"
++++ "os"
++++ "path/filepath"
++++
++++ "github.com/docker/libcontainer/console"
++++)
++++
++++func SetupPtmx(rootfs, consolePath, mountLabel string) error {
++++ ptmx := filepath.Join(rootfs, "dev/ptmx")
++++ if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
++++ return err
++++ }
++++
++++ if err := os.Symlink("pts/ptmx", ptmx); err != nil {
++++ return fmt.Errorf("symlink dev ptmx %s", err)
++++ }
++++
++++ if consolePath != "" {
++++ if err := console.Setup(rootfs, consolePath, mountLabel); err != nil {
++++ return err
++++ }
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package mount
++++
++++import (
++++ "syscall"
++++)
++++
++++func SetReadonly() error {
++++ return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "")
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package mount
++++
++++import "syscall"
++++
++++func RemountProc() error {
++++ if err := syscall.Unmount("/proc", syscall.MNT_DETACH); err != nil {
++++ return err
++++ }
++++
++++ if err := syscall.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil {
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++func RemountSys() error {
++++ if err := syscall.Unmount("/sys", syscall.MNT_DETACH); err != nil {
++++ if err != syscall.EINVAL {
++++ return err
++++ }
++++ } else {
++++ if err := syscall.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil {
++++ return err
++++ }
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package namespaces
++++
++++import (
++++ "os"
++++ "os/exec"
++++
++++ "github.com/docker/libcontainer"
++++)
++++
++++type CreateCommand func(container *libcontainer.Config, console, dataPath, init string, childPipe *os.File, args []string) *exec.Cmd
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package namespaces
++++
++++import (
++++ "io"
++++ "os"
++++ "os/exec"
++++ "syscall"
++++
++++ "github.com/docker/libcontainer"
++++ "github.com/docker/libcontainer/cgroups"
++++ "github.com/docker/libcontainer/cgroups/fs"
++++ "github.com/docker/libcontainer/cgroups/systemd"
++++ "github.com/docker/libcontainer/network"
++++ "github.com/docker/libcontainer/syncpipe"
++++ "github.com/docker/libcontainer/system"
++++)
++++
++++// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work.
++++// Move this to libcontainer package.
++++// Exec performs setup outside of a namespace so that a container can be
++++// executed. Exec is a high level function for working with container namespaces.
++++func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Writer, console, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
++++ var (
++++ err error
++++ )
++++
++++ // create a pipe so that we can syncronize with the namespaced process and
++++ // pass the veth name to the child
++++ syncPipe, err := syncpipe.NewSyncPipe()
++++ if err != nil {
++++ return -1, err
++++ }
++++ defer syncPipe.Close()
++++
++++ command := createCommand(container, console, dataPath, os.Args[0], syncPipe.Child(), args)
++++ // Note: these are only used in non-tty mode
++++ // if there is a tty for the container it will be opened within the namespace and the
++++ // fds will be duped to stdin, stdiout, and stderr
++++ command.Stdin = stdin
++++ command.Stdout = stdout
++++ command.Stderr = stderr
++++
++++ if err := command.Start(); err != nil {
++++ return -1, err
++++ }
++++
++++ // Now we passed the pipe to the child, close our side
++++ syncPipe.CloseChild()
++++
++++ started, err := system.GetProcessStartTime(command.Process.Pid)
++++ if err != nil {
++++ return -1, err
++++ }
++++
++++ // Do this before syncing with child so that no children
++++ // can escape the cgroup
++++ cgroupRef, err := SetupCgroups(container, command.Process.Pid)
++++ if err != nil {
++++ command.Process.Kill()
++++ command.Wait()
++++ return -1, err
++++ }
++++ defer cgroupRef.Cleanup()
++++
++++ cgroupPaths, err := cgroupRef.Paths()
++++ if err != nil {
++++ command.Process.Kill()
++++ command.Wait()
++++ return -1, err
++++ }
++++
++++ var networkState network.NetworkState
++++ if err := InitializeNetworking(container, command.Process.Pid, syncPipe, &networkState); err != nil {
++++ command.Process.Kill()
++++ command.Wait()
++++ return -1, err
++++ }
++++
++++ state := &libcontainer.State{
++++ InitPid: command.Process.Pid,
++++ InitStartTime: started,
++++ NetworkState: networkState,
++++ CgroupPaths: cgroupPaths,
++++ }
++++
++++ if err := libcontainer.SaveState(dataPath, state); err != nil {
++++ command.Process.Kill()
++++ command.Wait()
++++ return -1, err
++++ }
++++ defer libcontainer.DeleteState(dataPath)
++++
++++ // Sync with child
++++ if err := syncPipe.ReadFromChild(); err != nil {
++++ command.Process.Kill()
++++ command.Wait()
++++ return -1, err
++++ }
++++
++++ if startCallback != nil {
++++ startCallback()
++++ }
++++
++++ if err := command.Wait(); err != nil {
++++ if _, ok := err.(*exec.ExitError); !ok {
++++ return -1, err
++++ }
++++ }
++++
++++ return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil
++++}
++++
++++// DefaultCreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces
++++// defined on the container's configuration and use the current binary as the init with the
++++// args provided
++++//
++++// console: the /dev/console to setup inside the container
++++// init: the program executed inside the namespaces
++++// root: the path to the container json file and information
++++// pipe: sync pipe to synchronize the parent and child processes
++++// args: the arguments to pass to the container to run as the user's program
++++func DefaultCreateCommand(container *libcontainer.Config, console, dataPath, init string, pipe *os.File, args []string) *exec.Cmd {
++++ // get our binary name from arg0 so we can always reexec ourself
++++ env := []string{
++++ "console=" + console,
++++ "pipe=3",
++++ "data_path=" + dataPath,
++++ }
++++
++++ /*
++++ TODO: move user and wd into env
++++ if user != "" {
++++ env = append(env, "user="+user)
++++ }
++++ if workingDir != "" {
++++ env = append(env, "wd="+workingDir)
++++ }
++++ */
++++
++++ command := exec.Command(init, append([]string{"init", "--"}, args...)...)
++++ // make sure the process is executed inside the context of the rootfs
++++ command.Dir = container.RootFs
++++ command.Env = append(os.Environ(), env...)
++++
++++ if command.SysProcAttr == nil {
++++ command.SysProcAttr = &syscall.SysProcAttr{}
++++ }
++++ command.SysProcAttr.Cloneflags = uintptr(GetNamespaceFlags(container.Namespaces))
++++
++++ command.SysProcAttr.Pdeathsig = syscall.SIGKILL
++++ command.ExtraFiles = []*os.File{pipe}
++++
++++ return command
++++}
++++
++++// SetupCgroups applies the cgroup restrictions to the process running in the container based
++++// on the container's configuration
++++func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) {
++++ if container.Cgroups != nil {
++++ c := container.Cgroups
++++
++++ if systemd.UseSystemd() {
++++ return systemd.Apply(c, nspid)
++++ }
++++
++++ return fs.Apply(c, nspid)
++++ }
++++
++++ return nil, nil
++++}
++++
++++// InitializeNetworking creates the container's network stack outside of the namespace and moves
++++// interfaces into the container's net namespaces if necessary
++++func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncpipe.SyncPipe, networkState *network.NetworkState) error {
++++ for _, config := range container.Networks {
++++ strategy, err := network.GetStrategy(config.Type)
++++ if err != nil {
++++ return err
++++ }
++++ if err := strategy.Create((*network.Network)(config), nspid, networkState); err != nil {
++++ return err
++++ }
++++ }
++++ return pipe.SendToChild(networkState)
++++}
++++
++++// GetNamespaceFlags parses the container's Namespaces options to set the correct
++++// flags on clone, unshare, and setns
++++func GetNamespaceFlags(namespaces map[string]bool) (flag int) {
++++ for key, enabled := range namespaces {
++++ if enabled {
++++ if ns := GetNamespace(key); ns != nil {
++++ flag |= ns.Value
++++ }
++++ }
++++ }
++++ return flag
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package namespaces
++++
++++import (
++++ "fmt"
++++ "io"
++++ "os"
++++ "os/exec"
++++ "path/filepath"
++++ "strconv"
++++ "syscall"
++++
++++ "github.com/docker/libcontainer"
++++ "github.com/docker/libcontainer/apparmor"
++++ "github.com/docker/libcontainer/cgroups"
++++ "github.com/docker/libcontainer/label"
++++ "github.com/docker/libcontainer/syncpipe"
++++ "github.com/docker/libcontainer/system"
++++)
++++
++++// ExecIn reexec's the initPath with the argv 0 rewrite to "nsenter" so that it is able to run the
++++// setns code in a single threaded environment joining the existing containers' namespaces.
++++func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs []string, initPath, action string,
++++ stdin io.Reader, stdout, stderr io.Writer, console string, startCallback func(*exec.Cmd)) (int, error) {
++++
++++ args := []string{fmt.Sprintf("nsenter-%s", action), "--nspid", strconv.Itoa(state.InitPid)}
++++
++++ if console != "" {
++++ args = append(args, "--console", console)
++++ }
++++
++++ cmd := &exec.Cmd{
++++ Path: initPath,
++++ Args: append(args, append([]string{"--"}, userArgs...)...),
++++ }
++++
++++ if filepath.Base(initPath) == initPath {
++++ if lp, err := exec.LookPath(initPath); err == nil {
++++ cmd.Path = lp
++++ }
++++ }
++++
++++ pipe, err := syncpipe.NewSyncPipe()
++++ if err != nil {
++++ return -1, err
++++ }
++++ defer pipe.Close()
++++
++++ // Note: these are only used in non-tty mode
++++ // if there is a tty for the container it will be opened within the namespace and the
++++ // fds will be duped to stdin, stdiout, and stderr
++++ cmd.Stdin = stdin
++++ cmd.Stdout = stdout
++++ cmd.Stderr = stderr
++++
++++ cmd.ExtraFiles = []*os.File{pipe.Child()}
++++
++++ if err := cmd.Start(); err != nil {
++++ return -1, err
++++ }
++++ pipe.CloseChild()
++++
++++ // Enter cgroups.
++++ if err := EnterCgroups(state, cmd.Process.Pid); err != nil {
++++ return -1, err
++++ }
++++
++++ if err := pipe.SendToChild(container); err != nil {
++++ cmd.Process.Kill()
++++ cmd.Wait()
++++ return -1, err
++++ }
++++
++++ if startCallback != nil {
++++ startCallback(cmd)
++++ }
++++
++++ if err := cmd.Wait(); err != nil {
++++ if _, ok := err.(*exec.ExitError); !ok {
++++ return -1, err
++++ }
++++ }
++++
++++ return cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil
++++}
++++
++++// Finalize expects that the setns calls have been setup and that is has joined an
++++// existing namespace
++++func FinalizeSetns(container *libcontainer.Config, args []string) error {
++++ // clear the current processes env and replace it with the environment defined on the container
++++ if err := LoadContainerEnvironment(container); err != nil {
++++ return err
++++ }
++++
++++ if err := FinalizeNamespace(container); err != nil {
++++ return err
++++ }
++++
++++ if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil {
++++ return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err)
++++ }
++++
++++ if container.ProcessLabel != "" {
++++ if err := label.SetProcessLabel(container.ProcessLabel); err != nil {
++++ return err
++++ }
++++ }
++++
++++ if err := system.Execv(args[0], args[0:], container.Env); err != nil {
++++ return err
++++ }
++++
++++ panic("unreachable")
++++}
++++
++++func EnterCgroups(state *libcontainer.State, pid int) error {
++++ return cgroups.EnterPid(state.CgroupPaths, pid)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package namespaces
++++
++++import (
++++ "fmt"
++++ "os"
++++ "strings"
++++ "syscall"
++++
++++ "github.com/docker/libcontainer"
++++ "github.com/docker/libcontainer/apparmor"
++++ "github.com/docker/libcontainer/console"
++++ "github.com/docker/libcontainer/label"
++++ "github.com/docker/libcontainer/mount"
++++ "github.com/docker/libcontainer/netlink"
++++ "github.com/docker/libcontainer/network"
++++ "github.com/docker/libcontainer/security/capabilities"
++++ "github.com/docker/libcontainer/security/restrict"
++++ "github.com/docker/libcontainer/syncpipe"
++++ "github.com/docker/libcontainer/system"
++++ "github.com/docker/libcontainer/user"
++++ "github.com/docker/libcontainer/utils"
++++)
++++
++++// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work.
++++// Move this to libcontainer package.
++++// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
++++// and other options required for the new container.
++++// The caller of Init function has to ensure that the go runtime is locked to an OS thread
++++// (using runtime.LockOSThread) else system calls like setns called within Init may not work as intended.
++++func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *syncpipe.SyncPipe, args []string) (err error) {
++++ defer func() {
++++ if err != nil {
++++ syncPipe.ReportChildError(err)
++++ }
++++ }()
++++
++++ rootfs, err := utils.ResolveRootfs(uncleanRootfs)
++++ if err != nil {
++++ return err
++++ }
++++
++++ // clear the current processes env and replace it with the environment
++++ // defined on the container
++++ if err := LoadContainerEnvironment(container); err != nil {
++++ return err
++++ }
++++
++++ // We always read this as it is a way to sync with the parent as well
++++ var networkState *network.NetworkState
++++ if err := syncPipe.ReadFromParent(&networkState); err != nil {
++++ return err
++++ }
++++
++++ if consolePath != "" {
++++ if err := console.OpenAndDup(consolePath); err != nil {
++++ return err
++++ }
++++ }
++++ if _, err := syscall.Setsid(); err != nil {
++++ return fmt.Errorf("setsid %s", err)
++++ }
++++ if consolePath != "" {
++++ if err := system.Setctty(); err != nil {
++++ return fmt.Errorf("setctty %s", err)
++++ }
++++ }
++++ if err := setupNetwork(container, networkState); err != nil {
++++ return fmt.Errorf("setup networking %s", err)
++++ }
++++ if err := setupRoute(container); err != nil {
++++ return fmt.Errorf("setup route %s", err)
++++ }
++++
++++ label.Init()
++++
++++ if err := mount.InitializeMountNamespace(rootfs,
++++ consolePath,
++++ container.RestrictSys,
++++ (*mount.MountConfig)(container.MountConfig)); err != nil {
++++ return fmt.Errorf("setup mount namespace %s", err)
++++ }
++++
++++ if container.Hostname != "" {
++++ if err := syscall.Sethostname([]byte(container.Hostname)); err != nil {
++++ return fmt.Errorf("sethostname %s", err)
++++ }
++++ }
++++
++++ if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil {
++++ return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err)
++++ }
++++
++++ if err := label.SetProcessLabel(container.ProcessLabel); err != nil {
++++ return fmt.Errorf("set process label %s", err)
++++ }
++++
++++ // TODO: (crosbymichael) make this configurable at the Config level
++++ if container.RestrictSys {
++++ if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus"); err != nil {
++++ return err
++++ }
++++ }
++++
++++ pdeathSignal, err := system.GetParentDeathSignal()
++++ if err != nil {
++++ return fmt.Errorf("get parent death signal %s", err)
++++ }
++++
++++ if err := FinalizeNamespace(container); err != nil {
++++ return fmt.Errorf("finalize namespace %s", err)
++++ }
++++
++++ // FinalizeNamespace can change user/group which clears the parent death
++++ // signal, so we restore it here.
++++ if err := RestoreParentDeathSignal(pdeathSignal); err != nil {
++++ return fmt.Errorf("restore parent death signal %s", err)
++++ }
++++
++++ return system.Execv(args[0], args[0:], os.Environ())
++++}
++++
++++// RestoreParentDeathSignal sets the parent death signal to old.
++++func RestoreParentDeathSignal(old int) error {
++++ if old == 0 {
++++ return nil
++++ }
++++
++++ current, err := system.GetParentDeathSignal()
++++ if err != nil {
++++ return fmt.Errorf("get parent death signal %s", err)
++++ }
++++
++++ if old == current {
++++ return nil
++++ }
++++
++++ if err := system.ParentDeathSignal(uintptr(old)); err != nil {
++++ return fmt.Errorf("set parent death signal %s", err)
++++ }
++++
++++ // Signal self if parent is already dead. Does nothing if running in a new
++++ // PID namespace, as Getppid will always return 0.
++++ if syscall.Getppid() == 1 {
++++ return syscall.Kill(syscall.Getpid(), syscall.SIGKILL)
++++ }
++++
++++ return nil
++++}
++++
++++// SetupUser changes the groups, gid, and uid for the user inside the container
++++func SetupUser(u string) error {
++++ uid, gid, suppGids, home, err := user.GetUserGroupSupplementaryHome(u, syscall.Getuid(), syscall.Getgid(), "/")
++++ if err != nil {
++++ return fmt.Errorf("get supplementary groups %s", err)
++++ }
++++
++++ if err := syscall.Setgroups(suppGids); err != nil {
++++ return fmt.Errorf("setgroups %s", err)
++++ }
++++
++++ if err := syscall.Setgid(gid); err != nil {
++++ return fmt.Errorf("setgid %s", err)
++++ }
++++
++++ if err := syscall.Setuid(uid); err != nil {
++++ return fmt.Errorf("setuid %s", err)
++++ }
++++
++++ // if we didn't get HOME already, set it based on the user's HOME
++++ if envHome := os.Getenv("HOME"); envHome == "" {
++++ if err := os.Setenv("HOME", home); err != nil {
++++ return fmt.Errorf("set HOME %s", err)
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++// setupVethNetwork uses the Network config if it is not nil to initialize
++++// the new veth interface inside the container for use by changing the name to eth0
++++// setting the MTU and IP address along with the default gateway
++++func setupNetwork(container *libcontainer.Config, networkState *network.NetworkState) error {
++++ for _, config := range container.Networks {
++++ strategy, err := network.GetStrategy(config.Type)
++++ if err != nil {
++++ return err
++++ }
++++
++++ err1 := strategy.Initialize((*network.Network)(config), networkState)
++++ if err1 != nil {
++++ return err1
++++ }
++++ }
++++ return nil
++++}
++++
++++func setupRoute(container *libcontainer.Config) error {
++++ for _, config := range container.Routes {
++++ if err := netlink.AddRoute(config.Destination, config.Source, config.Gateway, config.InterfaceName); err != nil {
++++ return err
++++ }
++++ }
++++ return nil
++++}
++++
++++// FinalizeNamespace drops the caps, sets the correct user
++++// and working dir, and closes any leaky file descriptors
++++// before execing the command inside the namespace
++++func FinalizeNamespace(container *libcontainer.Config) error {
++++ // Ensure that all non-standard fds we may have accidentally
++++ // inherited are marked close-on-exec so they stay out of the
++++ // container
++++ if err := utils.CloseExecFrom(3); err != nil {
++++ return fmt.Errorf("close open file descriptors %s", err)
++++ }
++++
++++ // drop capabilities in bounding set before changing user
++++ if err := capabilities.DropBoundingSet(container.Capabilities); err != nil {
++++ return fmt.Errorf("drop bounding set %s", err)
++++ }
++++
++++ // preserve existing capabilities while we change users
++++ if err := system.SetKeepCaps(); err != nil {
++++ return fmt.Errorf("set keep caps %s", err)
++++ }
++++
++++ if err := SetupUser(container.User); err != nil {
++++ return fmt.Errorf("setup user %s", err)
++++ }
++++
++++ if err := system.ClearKeepCaps(); err != nil {
++++ return fmt.Errorf("clear keep caps %s", err)
++++ }
++++
++++ // drop all other capabilities
++++ if err := capabilities.DropCapabilities(container.Capabilities); err != nil {
++++ return fmt.Errorf("drop capabilities %s", err)
++++ }
++++
++++ if container.WorkingDir != "" {
++++ if err := syscall.Chdir(container.WorkingDir); err != nil {
++++ return fmt.Errorf("chdir to %s %s", container.WorkingDir, err)
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++func LoadContainerEnvironment(container *libcontainer.Config) error {
++++ os.Clearenv()
++++ for _, pair := range container.Env {
++++ p := strings.SplitN(pair, "=", 2)
++++ if len(p) < 2 {
++++ return fmt.Errorf("invalid environment '%v'", pair)
++++ }
++++ if err := os.Setenv(p[0], p[1]); err != nil {
++++ return err
++++ }
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++## nsenter
++++
++++The `nsenter` package registers a special init constructor that is called before the Go runtime has
++++a chance to boot. This provides us the ability to `setns` on existing namespaces and avoid the issues
++++that the Go runtime has with multiple threads. This constructor is only called if this package is
++++registered, imported, in your go application and the argv 0 is `nsenter`.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build cgo
++++//
++++// formated with indent -linux nsenter.c
++++
++++#include <errno.h>
++++#include <fcntl.h>
++++#include <linux/limits.h>
++++#include <linux/sched.h>
++++#include <signal.h>
++++#include <stdio.h>
++++#include <stdlib.h>
++++#include <string.h>
++++#include <sys/types.h>
++++#include <unistd.h>
++++#include <getopt.h>
++++
++++static const kBufSize = 256;
++++static const char *kNsEnter = "nsenter";
++++
++++void get_args(int *argc, char ***argv)
++++{
++++ // Read argv
++++ int fd = open("/proc/self/cmdline", O_RDONLY);
++++
++++ // Read the whole commandline.
++++ ssize_t contents_size = 0;
++++ ssize_t contents_offset = 0;
++++ char *contents = NULL;
++++ ssize_t bytes_read = 0;
++++ do {
++++ contents_size += kBufSize;
++++ contents = (char *)realloc(contents, contents_size);
++++ bytes_read =
++++ read(fd, contents + contents_offset,
++++ contents_size - contents_offset);
++++ contents_offset += bytes_read;
++++ }
++++ while (bytes_read > 0);
++++ close(fd);
++++
++++ // Parse the commandline into an argv. /proc/self/cmdline has \0 delimited args.
++++ ssize_t i;
++++ *argc = 0;
++++ for (i = 0; i < contents_offset; i++) {
++++ if (contents[i] == '\0') {
++++ (*argc)++;
++++ }
++++ }
++++ *argv = (char **)malloc(sizeof(char *) * ((*argc) + 1));
++++ int idx;
++++ for (idx = 0; idx < (*argc); idx++) {
++++ (*argv)[idx] = contents;
++++ contents += strlen(contents) + 1;
++++ }
++++ (*argv)[*argc] = NULL;
++++}
++++
++++// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12)
++++#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
++++#define _GNU_SOURCE
++++#include <sched.h>
++++#include "syscall.h"
++++#ifdef SYS_setns
++++int setns(int fd, int nstype)
++++{
++++ return syscall(SYS_setns, fd, nstype);
++++}
++++#endif
++++#endif
++++
++++void print_usage()
++++{
++++ fprintf(stderr,
++++ "nsenter --nspid <pid> --console <console> -- cmd1 arg1 arg2...\n");
++++}
++++
++++void nsenter()
++++{
++++ int argc, c;
++++ char **argv;
++++ get_args(&argc, &argv);
++++
++++ // check argv 0 to ensure that we are supposed to setns
++++ // we use strncmp to test for a value of "nsenter" but also allows alternate implmentations
++++ // after the setns code path to continue to use the argv 0 to determine actions to be run
++++ // resulting in the ability to specify "nsenter-mknod", "nsenter-exec", etc...
++++ if (strncmp(argv[0], kNsEnter, strlen(kNsEnter)) != 0) {
++++ return;
++++ }
++++
++++ static const struct option longopts[] = {
++++ {"nspid", required_argument, NULL, 'n'},
++++ {"console", required_argument, NULL, 't'},
++++ {NULL, 0, NULL, 0}
++++ };
++++
++++ pid_t init_pid = -1;
++++ char *init_pid_str = NULL;
++++ char *console = NULL;
++++ while ((c = getopt_long_only(argc, argv, "n:c:", longopts, NULL)) != -1) {
++++ switch (c) {
++++ case 'n':
++++ init_pid_str = optarg;
++++ break;
++++ case 't':
++++ console = optarg;
++++ break;
++++ }
++++ }
++++
++++ if (init_pid_str == NULL) {
++++ print_usage();
++++ exit(1);
++++ }
++++
++++ init_pid = strtol(init_pid_str, NULL, 10);
++++ if ((init_pid == 0 && errno == EINVAL) || errno == ERANGE) {
++++ fprintf(stderr,
++++ "nsenter: Failed to parse PID from \"%s\" with output \"%d\" and error: \"%s\"\n",
++++ init_pid_str, init_pid, strerror(errno));
++++ print_usage();
++++ exit(1);
++++ }
++++
++++ argc -= 3;
++++ argv += 3;
++++
++++ if (setsid() == -1) {
++++ fprintf(stderr, "setsid failed. Error: %s\n", strerror(errno));
++++ exit(1);
++++ }
++++ // before we setns we need to dup the console
++++ int consolefd = -1;
++++ if (console != NULL) {
++++ consolefd = open(console, O_RDWR);
++++ if (consolefd < 0) {
++++ fprintf(stderr,
++++ "nsenter: failed to open console %s %s\n",
++++ console, strerror(errno));
++++ exit(1);
++++ }
++++ }
++++ // Setns on all supported namespaces.
++++ char ns_dir[PATH_MAX];
++++ memset(ns_dir, 0, PATH_MAX);
++++ snprintf(ns_dir, PATH_MAX - 1, "/proc/%d/ns/", init_pid);
++++
++++ char *namespaces[] = { "ipc", "uts", "net", "pid", "mnt" };
++++ const int num = sizeof(namespaces) / sizeof(char *);
++++ int i;
++++ for (i = 0; i < num; i++) {
++++ char buf[PATH_MAX];
++++ memset(buf, 0, PATH_MAX);
++++ snprintf(buf, PATH_MAX - 1, "%s%s", ns_dir, namespaces[i]);
++++ int fd = open(buf, O_RDONLY);
++++ if (fd == -1) {
++++ // Ignore nonexistent namespaces.
++++ if (errno == ENOENT)
++++ continue;
++++
++++ fprintf(stderr,
++++ "nsenter: Failed to open ns file \"%s\" for ns \"%s\" with error: \"%s\"\n",
++++ buf, namespaces[i], strerror(errno));
++++ exit(1);
++++ }
++++ // Set the namespace.
++++ if (setns(fd, 0) == -1) {
++++ fprintf(stderr,
++++ "nsenter: Failed to setns for \"%s\" with error: \"%s\"\n",
++++ namespaces[i], strerror(errno));
++++ exit(1);
++++ }
++++ close(fd);
++++ }
++++
++++ // We must fork to actually enter the PID namespace.
++++ int child = fork();
++++ if (child == 0) {
++++ if (consolefd != -1) {
++++ if (dup2(consolefd, STDIN_FILENO) != 0) {
++++ fprintf(stderr, "nsenter: failed to dup 0 %s\n",
++++ strerror(errno));
++++ exit(1);
++++ }
++++ if (dup2(consolefd, STDOUT_FILENO) != STDOUT_FILENO) {
++++ fprintf(stderr, "nsenter: failed to dup 1 %s\n",
++++ strerror(errno));
++++ exit(1);
++++ }
++++ if (dup2(consolefd, STDERR_FILENO) != STDERR_FILENO) {
++++ fprintf(stderr, "nsenter: failed to dup 2 %s\n",
++++ strerror(errno));
++++ exit(1);
++++ }
++++ }
++++ // Finish executing, let the Go runtime take over.
++++ return;
++++ } else {
++++ // Parent, wait for the child.
++++ int status = 0;
++++ if (waitpid(child, &status, 0) == -1) {
++++ fprintf(stderr,
++++ "nsenter: Failed to waitpid with error: \"%s\"\n",
++++ strerror(errno));
++++ exit(1);
++++ }
++++ // Forward the child's exit code or re-send its death signal.
++++ if (WIFEXITED(status)) {
++++ exit(WEXITSTATUS(status));
++++ } else if (WIFSIGNALED(status)) {
++++ kill(getpid(), WTERMSIG(status));
++++ }
++++
++++ exit(1);
++++ }
++++
++++ return;
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package nsenter
++++
++++/*
++++__attribute__((constructor)) init() {
++++ nsenter();
++++}
++++*/
++++import "C"
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !linux !cgo
++++
++++package nsenter
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package namespaces
++++
++++import "errors"
++++
++++type (
++++ Namespace struct {
++++ Key string `json:"key,omitempty"`
++++ Value int `json:"value,omitempty"`
++++ File string `json:"file,omitempty"`
++++ }
++++ Namespaces []*Namespace
++++)
++++
++++// namespaceList is used to convert the libcontainer types
++++// into the names of the files located in /proc/<pid>/ns/* for
++++// each namespace
++++var (
++++ namespaceList = Namespaces{}
++++ ErrUnkownNamespace = errors.New("Unknown namespace")
++++ ErrUnsupported = errors.New("Unsupported method")
++++)
++++
++++func (ns *Namespace) String() string {
++++ return ns.Key
++++}
++++
++++func GetNamespace(key string) *Namespace {
++++ for _, ns := range namespaceList {
++++ if ns.Key == key {
++++ cpy := *ns
++++ return &cpy
++++ }
++++ }
++++ return nil
++++}
++++
++++// Contains returns true if the specified Namespace is
++++// in the slice
++++func (n Namespaces) Contains(ns string) bool {
++++ return n.Get(ns) != nil
++++}
++++
++++func (n Namespaces) Get(ns string) *Namespace {
++++ for _, nsp := range n {
++++ if nsp != nil && nsp.Key == ns {
++++ return nsp
++++ }
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package namespaces
++++
++++import (
++++ "syscall"
++++)
++++
++++func init() {
++++ namespaceList = Namespaces{
++++ {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"},
++++ {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"},
++++ {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"},
++++ {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"},
++++ {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"},
++++ {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"},
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package namespaces
++++
++++import (
++++ "testing"
++++)
++++
++++func TestNamespacesContains(t *testing.T) {
++++ ns := Namespaces{
++++ GetNamespace("NEWPID"),
++++ GetNamespace("NEWNS"),
++++ GetNamespace("NEWUTS"),
++++ }
++++
++++ if ns.Contains("NEWNET") {
++++ t.Fatal("namespaces should not contain NEWNET")
++++ }
++++
++++ if !ns.Contains("NEWPID") {
++++ t.Fatal("namespaces should contain NEWPID but does not")
++++ }
++++
++++ withNil := Namespaces{
++++ GetNamespace("UNDEFINED"), // this element will be nil
++++ GetNamespace("NEWPID"),
++++ }
++++
++++ if !withNil.Contains("NEWPID") {
++++ t.Fatal("namespaces should contain NEWPID but does not")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
++++Guillaume J. Charmes <guillaume@docker.com> (@creack)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// Packet netlink provide access to low level Netlink sockets and messages.
++++//
++++// Actual implementations are in:
++++// netlink_linux.go
++++// netlink_darwin.go
++++package netlink
++++
++++import (
++++ "errors"
++++ "net"
++++)
++++
++++var (
++++ ErrWrongSockType = errors.New("Wrong socket type")
++++ ErrShortResponse = errors.New("Got short response from netlink")
++++ ErrInterfaceExists = errors.New("Network interface already exists")
++++)
++++
++++// A Route is a subnet associated with the interface to reach it.
++++type Route struct {
++++ *net.IPNet
++++ Iface *net.Interface
++++ Default bool
++++}
++++
++++// An IfAddr defines IP network settings for a given network interface
++++type IfAddr struct {
++++ Iface *net.Interface
++++ IP net.IP
++++ IPNet *net.IPNet
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package netlink
++++
++++import (
++++ "encoding/binary"
++++ "fmt"
++++ "io"
++++ "math/rand"
++++ "net"
++++ "os"
++++ "sync/atomic"
++++ "syscall"
++++ "unsafe"
++++)
++++
++++const (
++++ IFNAMSIZ = 16
++++ DEFAULT_CHANGE = 0xFFFFFFFF
++++ IFLA_INFO_KIND = 1
++++ IFLA_INFO_DATA = 2
++++ VETH_INFO_PEER = 1
++++ IFLA_MACVLAN_MODE = 1
++++ IFLA_VLAN_ID = 1
++++ IFLA_NET_NS_FD = 28
++++ IFLA_ADDRESS = 1
++++ SIOC_BRADDBR = 0x89a0
++++ SIOC_BRDELBR = 0x89a1
++++ SIOC_BRADDIF = 0x89a2
++++)
++++
++++const (
++++ MACVLAN_MODE_PRIVATE = 1 << iota
++++ MACVLAN_MODE_VEPA
++++ MACVLAN_MODE_BRIDGE
++++ MACVLAN_MODE_PASSTHRU
++++)
++++
++++var nextSeqNr uint32
++++
++++type ifreqHwaddr struct {
++++ IfrnName [IFNAMSIZ]byte
++++ IfruHwaddr syscall.RawSockaddr
++++}
++++
++++type ifreqIndex struct {
++++ IfrnName [IFNAMSIZ]byte
++++ IfruIndex int32
++++}
++++
++++type ifreqFlags struct {
++++ IfrnName [IFNAMSIZ]byte
++++ Ifruflags uint16
++++}
++++
++++var native binary.ByteOrder
++++
++++func init() {
++++ var x uint32 = 0x01020304
++++ if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
++++ native = binary.BigEndian
++++ } else {
++++ native = binary.LittleEndian
++++ }
++++}
++++
++++func getIpFamily(ip net.IP) int {
++++ if len(ip) <= net.IPv4len {
++++ return syscall.AF_INET
++++ }
++++ if ip.To4() != nil {
++++ return syscall.AF_INET
++++ }
++++ return syscall.AF_INET6
++++}
++++
++++type NetlinkRequestData interface {
++++ Len() int
++++ ToWireFormat() []byte
++++}
++++
++++type IfInfomsg struct {
++++ syscall.IfInfomsg
++++}
++++
++++func newIfInfomsg(family int) *IfInfomsg {
++++ return &IfInfomsg{
++++ IfInfomsg: syscall.IfInfomsg{
++++ Family: uint8(family),
++++ },
++++ }
++++}
++++
++++func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg {
++++ msg := newIfInfomsg(family)
++++ parent.children = append(parent.children, msg)
++++ return msg
++++}
++++
++++func (msg *IfInfomsg) ToWireFormat() []byte {
++++ length := syscall.SizeofIfInfomsg
++++ b := make([]byte, length)
++++ b[0] = msg.Family
++++ b[1] = 0
++++ native.PutUint16(b[2:4], msg.Type)
++++ native.PutUint32(b[4:8], uint32(msg.Index))
++++ native.PutUint32(b[8:12], msg.Flags)
++++ native.PutUint32(b[12:16], msg.Change)
++++ return b
++++}
++++
++++func (msg *IfInfomsg) Len() int {
++++ return syscall.SizeofIfInfomsg
++++}
++++
++++type IfAddrmsg struct {
++++ syscall.IfAddrmsg
++++}
++++
++++func newIfAddrmsg(family int) *IfAddrmsg {
++++ return &IfAddrmsg{
++++ IfAddrmsg: syscall.IfAddrmsg{
++++ Family: uint8(family),
++++ },
++++ }
++++}
++++
++++func (msg *IfAddrmsg) ToWireFormat() []byte {
++++ length := syscall.SizeofIfAddrmsg
++++ b := make([]byte, length)
++++ b[0] = msg.Family
++++ b[1] = msg.Prefixlen
++++ b[2] = msg.Flags
++++ b[3] = msg.Scope
++++ native.PutUint32(b[4:8], msg.Index)
++++ return b
++++}
++++
++++func (msg *IfAddrmsg) Len() int {
++++ return syscall.SizeofIfAddrmsg
++++}
++++
++++type RtMsg struct {
++++ syscall.RtMsg
++++}
++++
++++func newRtMsg() *RtMsg {
++++ return &RtMsg{
++++ RtMsg: syscall.RtMsg{
++++ Table: syscall.RT_TABLE_MAIN,
++++ Scope: syscall.RT_SCOPE_UNIVERSE,
++++ Protocol: syscall.RTPROT_BOOT,
++++ Type: syscall.RTN_UNICAST,
++++ },
++++ }
++++}
++++
++++func (msg *RtMsg) ToWireFormat() []byte {
++++ length := syscall.SizeofRtMsg
++++ b := make([]byte, length)
++++ b[0] = msg.Family
++++ b[1] = msg.Dst_len
++++ b[2] = msg.Src_len
++++ b[3] = msg.Tos
++++ b[4] = msg.Table
++++ b[5] = msg.Protocol
++++ b[6] = msg.Scope
++++ b[7] = msg.Type
++++ native.PutUint32(b[8:12], msg.Flags)
++++ return b
++++}
++++
++++func (msg *RtMsg) Len() int {
++++ return syscall.SizeofRtMsg
++++}
++++
++++func rtaAlignOf(attrlen int) int {
++++ return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1)
++++}
++++
++++type RtAttr struct {
++++ syscall.RtAttr
++++ Data []byte
++++ children []NetlinkRequestData
++++}
++++
++++func newRtAttr(attrType int, data []byte) *RtAttr {
++++ return &RtAttr{
++++ RtAttr: syscall.RtAttr{
++++ Type: uint16(attrType),
++++ },
++++ children: []NetlinkRequestData{},
++++ Data: data,
++++ }
++++}
++++
++++func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr {
++++ attr := newRtAttr(attrType, data)
++++ parent.children = append(parent.children, attr)
++++ return attr
++++}
++++
++++func (a *RtAttr) Len() int {
++++ if len(a.children) == 0 {
++++ return (syscall.SizeofRtAttr + len(a.Data))
++++ }
++++
++++ l := 0
++++ for _, child := range a.children {
++++ l += child.Len()
++++ }
++++ l += syscall.SizeofRtAttr
++++ return rtaAlignOf(l + len(a.Data))
++++}
++++
++++func (a *RtAttr) ToWireFormat() []byte {
++++ length := a.Len()
++++ buf := make([]byte, rtaAlignOf(length))
++++
++++ if a.Data != nil {
++++ copy(buf[4:], a.Data)
++++ } else {
++++ next := 4
++++ for _, child := range a.children {
++++ childBuf := child.ToWireFormat()
++++ copy(buf[next:], childBuf)
++++ next += rtaAlignOf(len(childBuf))
++++ }
++++ }
++++
++++ if l := uint16(length); l != 0 {
++++ native.PutUint16(buf[0:2], l)
++++ }
++++ native.PutUint16(buf[2:4], a.Type)
++++ return buf
++++}
++++
++++func uint32Attr(t int, n uint32) *RtAttr {
++++ buf := make([]byte, 4)
++++ native.PutUint32(buf, n)
++++ return newRtAttr(t, buf)
++++}
++++
++++type NetlinkRequest struct {
++++ syscall.NlMsghdr
++++ Data []NetlinkRequestData
++++}
++++
++++func (rr *NetlinkRequest) ToWireFormat() []byte {
++++ length := rr.Len
++++ dataBytes := make([][]byte, len(rr.Data))
++++ for i, data := range rr.Data {
++++ dataBytes[i] = data.ToWireFormat()
++++ length += uint32(len(dataBytes[i]))
++++ }
++++ b := make([]byte, length)
++++ native.PutUint32(b[0:4], length)
++++ native.PutUint16(b[4:6], rr.Type)
++++ native.PutUint16(b[6:8], rr.Flags)
++++ native.PutUint32(b[8:12], rr.Seq)
++++ native.PutUint32(b[12:16], rr.Pid)
++++
++++ next := 16
++++ for _, data := range dataBytes {
++++ copy(b[next:], data)
++++ next += len(data)
++++ }
++++ return b
++++}
++++
++++func (rr *NetlinkRequest) AddData(data NetlinkRequestData) {
++++ if data != nil {
++++ rr.Data = append(rr.Data, data)
++++ }
++++}
++++
++++func newNetlinkRequest(proto, flags int) *NetlinkRequest {
++++ return &NetlinkRequest{
++++ NlMsghdr: syscall.NlMsghdr{
++++ Len: uint32(syscall.NLMSG_HDRLEN),
++++ Type: uint16(proto),
++++ Flags: syscall.NLM_F_REQUEST | uint16(flags),
++++ Seq: atomic.AddUint32(&nextSeqNr, 1),
++++ },
++++ }
++++}
++++
++++type NetlinkSocket struct {
++++ fd int
++++ lsa syscall.SockaddrNetlink
++++}
++++
++++func getNetlinkSocket() (*NetlinkSocket, error) {
++++ fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE)
++++ if err != nil {
++++ return nil, err
++++ }
++++ s := &NetlinkSocket{
++++ fd: fd,
++++ }
++++ s.lsa.Family = syscall.AF_NETLINK
++++ if err := syscall.Bind(fd, &s.lsa); err != nil {
++++ syscall.Close(fd)
++++ return nil, err
++++ }
++++
++++ return s, nil
++++}
++++
++++func (s *NetlinkSocket) Close() {
++++ syscall.Close(s.fd)
++++}
++++
++++func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
++++ if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil {
++++ return err
++++ }
++++ return nil
++++}
++++
++++func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) {
++++ rb := make([]byte, syscall.Getpagesize())
++++ nr, _, err := syscall.Recvfrom(s.fd, rb, 0)
++++ if err != nil {
++++ return nil, err
++++ }
++++ if nr < syscall.NLMSG_HDRLEN {
++++ return nil, ErrShortResponse
++++ }
++++ rb = rb[:nr]
++++ return syscall.ParseNetlinkMessage(rb)
++++}
++++
++++func (s *NetlinkSocket) GetPid() (uint32, error) {
++++ lsa, err := syscall.Getsockname(s.fd)
++++ if err != nil {
++++ return 0, err
++++ }
++++ switch v := lsa.(type) {
++++ case *syscall.SockaddrNetlink:
++++ return v.Pid, nil
++++ }
++++ return 0, ErrWrongSockType
++++}
++++
++++func (s *NetlinkSocket) CheckMessage(m syscall.NetlinkMessage, seq, pid uint32) error {
++++ if m.Header.Seq != seq {
++++ return fmt.Errorf("netlink: invalid seq %d, expected %d", m.Header.Seq, seq)
++++ }
++++ if m.Header.Pid != pid {
++++ return fmt.Errorf("netlink: wrong pid %d, expected %d", m.Header.Pid, pid)
++++ }
++++ if m.Header.Type == syscall.NLMSG_DONE {
++++ return io.EOF
++++ }
++++ if m.Header.Type == syscall.NLMSG_ERROR {
++++ e := int32(native.Uint32(m.Data[0:4]))
++++ if e == 0 {
++++ return io.EOF
++++ }
++++ return syscall.Errno(-e)
++++ }
++++ return nil
++++}
++++
++++func (s *NetlinkSocket) HandleAck(seq uint32) error {
++++ pid, err := s.GetPid()
++++ if err != nil {
++++ return err
++++ }
++++
++++outer:
++++ for {
++++ msgs, err := s.Receive()
++++ if err != nil {
++++ return err
++++ }
++++ for _, m := range msgs {
++++ if err := s.CheckMessage(m, seq, pid); err != nil {
++++ if err == io.EOF {
++++ break outer
++++ }
++++ return err
++++ }
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++func zeroTerminated(s string) []byte {
++++ return []byte(s + "\000")
++++}
++++
++++func nonZeroTerminated(s string) []byte {
++++ return []byte(s)
++++}
++++
++++// Add a new network link of a specified type.
++++// This is identical to running: ip link add $name type $linkType
++++func NetworkLinkAdd(name string, linkType string) error {
++++ if name == "" || linkType == "" {
++++ return fmt.Errorf("Neither link name nor link type can be empty!")
++++ }
++++
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ wb.AddData(msg)
++++
++++ linkInfo := newRtAttr(syscall.IFLA_LINKINFO, nil)
++++ newRtAttrChild(linkInfo, IFLA_INFO_KIND, nonZeroTerminated(linkType))
++++ wb.AddData(linkInfo)
++++
++++ nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name))
++++ wb.AddData(nameData)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Delete a network link.
++++// This is identical to running: ip link del $name
++++func NetworkLinkDel(name string) error {
++++ if name == "" {
++++ return fmt.Errorf("Network link name can not be empty!")
++++ }
++++
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++
++++ wb := newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ msg.Index = int32(iface.Index)
++++ wb.AddData(msg)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Bring up a particular network interface.
++++// This is identical to running: ip link set dev $name up
++++func NetworkLinkUp(iface *net.Interface) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ msg.Index = int32(iface.Index)
++++ msg.Flags = syscall.IFF_UP
++++ msg.Change = syscall.IFF_UP
++++ wb.AddData(msg)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Bring down a particular network interface.
++++// This is identical to running: ip link set $name down
++++func NetworkLinkDown(iface *net.Interface) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ msg.Index = int32(iface.Index)
++++ msg.Flags = 0 & ^syscall.IFF_UP
++++ msg.Change = DEFAULT_CHANGE
++++ wb.AddData(msg)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Set link layer address ie. MAC Address.
++++// This is identical to running: ip link set dev $name address $macaddress
++++func NetworkSetMacAddress(iface *net.Interface, macaddr string) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ hwaddr, err := net.ParseMAC(macaddr)
++++ if err != nil {
++++ return err
++++ }
++++
++++ var (
++++ MULTICAST byte = 0x1
++++ LOCALOUI byte = 0x2
++++ )
++++
++++ if hwaddr[0]&0x1 == MULTICAST || hwaddr[0]&0x2 != LOCALOUI {
++++ return fmt.Errorf("Incorrect Local MAC Address specified: %s", macaddr)
++++ }
++++
++++ wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ msg.Index = int32(iface.Index)
++++ msg.Change = DEFAULT_CHANGE
++++ wb.AddData(msg)
++++
++++ macdata := make([]byte, 6)
++++ copy(macdata, hwaddr)
++++ data := newRtAttr(IFLA_ADDRESS, macdata)
++++ wb.AddData(data)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Set link Maximum Transmission Unit
++++// This is identical to running: ip link set dev $name mtu $MTU
++++// bridge is a bitch here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=292088
++++// https://bugzilla.redhat.com/show_bug.cgi?id=697021
++++// There is a discussion about how to deal with ifcs joining bridge with MTU > 1500
++++// Regular network nterfaces do seem to work though!
++++func NetworkSetMTU(iface *net.Interface, mtu int) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ msg.Type = syscall.RTM_SETLINK
++++ msg.Flags = syscall.NLM_F_REQUEST
++++ msg.Index = int32(iface.Index)
++++ msg.Change = DEFAULT_CHANGE
++++ wb.AddData(msg)
++++ wb.AddData(uint32Attr(syscall.IFLA_MTU, uint32(mtu)))
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++func networkMasterAction(iface *net.Interface, rtattr *RtAttr) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ msg.Type = syscall.RTM_SETLINK
++++ msg.Flags = syscall.NLM_F_REQUEST
++++ msg.Index = int32(iface.Index)
++++ msg.Change = DEFAULT_CHANGE
++++ wb.AddData(msg)
++++ wb.AddData(rtattr)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Add an interface to bridge.
++++// This is identical to running: ip link set $name master $master
++++func NetworkSetMaster(iface, master *net.Interface) error {
++++ data := uint32Attr(syscall.IFLA_MASTER, uint32(master.Index))
++++ return networkMasterAction(iface, data)
++++}
++++
++++// Remove an interface from the bridge
++++// This is is identical to to running: ip link $name set nomaster
++++func NetworkSetNoMaster(iface *net.Interface) error {
++++ data := uint32Attr(syscall.IFLA_MASTER, 0)
++++ return networkMasterAction(iface, data)
++++}
++++
++++func networkSetNsAction(iface *net.Interface, rtattr *RtAttr) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ msg.Index = int32(iface.Index)
++++ wb.AddData(msg)
++++ wb.AddData(rtattr)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Move a particular network interface to a particular network namespace
++++// specified by PID. This is idential to running: ip link set dev $name netns $pid
++++func NetworkSetNsPid(iface *net.Interface, nspid int) error {
++++ data := uint32Attr(syscall.IFLA_NET_NS_PID, uint32(nspid))
++++ return networkSetNsAction(iface, data)
++++}
++++
++++// Move a particular network interface to a particular mounted
++++// network namespace specified by file descriptor.
++++// This is idential to running: ip link set dev $name netns $fd
++++func NetworkSetNsFd(iface *net.Interface, fd int) error {
++++ data := uint32Attr(IFLA_NET_NS_FD, uint32(fd))
++++ return networkSetNsAction(iface, data)
++++}
++++
++++// Rname a particular interface to a different name
++++// !!! Note that you can't rename an active interface. You need to bring it down before renaming it.
++++// This is identical to running: ip link set dev ${oldName} name ${newName}
++++func NetworkChangeName(iface *net.Interface, newName string) error {
++++ if len(newName) >= IFNAMSIZ {
++++ return fmt.Errorf("Interface name %s too long", newName)
++++ }
++++
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ msg.Index = int32(iface.Index)
++++ msg.Change = DEFAULT_CHANGE
++++ wb.AddData(msg)
++++
++++ nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(newName))
++++ wb.AddData(nameData)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Add a new VETH pair link on the host
++++// This is identical to running: ip link add name $name type veth peer name $peername
++++func NetworkCreateVethPair(name1, name2 string) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ wb.AddData(msg)
++++
++++ nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1))
++++ wb.AddData(nameData)
++++
++++ nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil)
++++ newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth"))
++++ nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil)
++++ nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil)
++++
++++ newIfInfomsgChild(nest3, syscall.AF_UNSPEC)
++++ newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2))
++++
++++ wb.AddData(nest1)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ if err := s.HandleAck(wb.Seq); err != nil {
++++ if os.IsExist(err) {
++++ return ErrInterfaceExists
++++ }
++++
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++// Add a new VLAN interface with masterDev as its upper device
++++// This is identical to running:
++++// ip link add name $name link $masterdev type vlan id $id
++++func NetworkLinkAddVlan(masterDev, vlanDev string, vlanId uint16) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
++++
++++ masterDevIfc, err := net.InterfaceByName(masterDev)
++++ if err != nil {
++++ return err
++++ }
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ wb.AddData(msg)
++++
++++ nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil)
++++ newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("vlan"))
++++
++++ nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil)
++++ vlanData := make([]byte, 2)
++++ native.PutUint16(vlanData, vlanId)
++++ newRtAttrChild(nest2, IFLA_VLAN_ID, vlanData)
++++ wb.AddData(nest1)
++++
++++ wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index)))
++++ wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(vlanDev)))
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Add MAC VLAN network interface with masterDev as its upper device
++++// This is identical to running:
++++// ip link add name $name link $masterdev type macvlan mode $mode
++++func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ macVlan := map[string]uint32{
++++ "private": MACVLAN_MODE_PRIVATE,
++++ "vepa": MACVLAN_MODE_VEPA,
++++ "bridge": MACVLAN_MODE_BRIDGE,
++++ "passthru": MACVLAN_MODE_PASSTHRU,
++++ }
++++
++++ wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
++++
++++ masterDevIfc, err := net.InterfaceByName(masterDev)
++++ if err != nil {
++++ return err
++++ }
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ wb.AddData(msg)
++++
++++ nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil)
++++ newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("macvlan"))
++++
++++ nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil)
++++ macVlanData := make([]byte, 4)
++++ native.PutUint32(macVlanData, macVlan[mode])
++++ newRtAttrChild(nest2, IFLA_MACVLAN_MODE, macVlanData)
++++ wb.AddData(nest1)
++++
++++ wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index)))
++++ wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(macVlanDev)))
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++func networkLinkIpAction(action, flags int, ifa IfAddr) error {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ family := getIpFamily(ifa.IP)
++++
++++ wb := newNetlinkRequest(action, flags)
++++
++++ msg := newIfAddrmsg(family)
++++ msg.Index = uint32(ifa.Iface.Index)
++++ prefixLen, _ := ifa.IPNet.Mask.Size()
++++ msg.Prefixlen = uint8(prefixLen)
++++ wb.AddData(msg)
++++
++++ var ipData []byte
++++ if family == syscall.AF_INET {
++++ ipData = ifa.IP.To4()
++++ } else {
++++ ipData = ifa.IP.To16()
++++ }
++++
++++ localData := newRtAttr(syscall.IFA_LOCAL, ipData)
++++ wb.AddData(localData)
++++
++++ addrData := newRtAttr(syscall.IFA_ADDRESS, ipData)
++++ wb.AddData(addrData)
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Delete an IP address from an interface. This is identical to:
++++// ip addr del $ip/$ipNet dev $iface
++++func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
++++ return networkLinkIpAction(
++++ syscall.RTM_DELADDR,
++++ syscall.NLM_F_ACK,
++++ IfAddr{iface, ip, ipNet},
++++ )
++++}
++++
++++// Add an Ip address to an interface. This is identical to:
++++// ip addr add $ip/$ipNet dev $iface
++++func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
++++ return networkLinkIpAction(
++++ syscall.RTM_NEWADDR,
++++ syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK,
++++ IfAddr{iface, ip, ipNet},
++++ )
++++}
++++
++++// Returns an array of IPNet for all the currently routed subnets on ipv4
++++// This is similar to the first column of "ip route" output
++++func NetworkGetRoutes() ([]Route, error) {
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP)
++++
++++ msg := newIfInfomsg(syscall.AF_UNSPEC)
++++ wb.AddData(msg)
++++
++++ if err := s.Send(wb); err != nil {
++++ return nil, err
++++ }
++++
++++ pid, err := s.GetPid()
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ res := make([]Route, 0)
++++
++++outer:
++++ for {
++++ msgs, err := s.Receive()
++++ if err != nil {
++++ return nil, err
++++ }
++++ for _, m := range msgs {
++++ if err := s.CheckMessage(m, wb.Seq, pid); err != nil {
++++ if err == io.EOF {
++++ break outer
++++ }
++++ return nil, err
++++ }
++++ if m.Header.Type != syscall.RTM_NEWROUTE {
++++ continue
++++ }
++++
++++ var r Route
++++
++++ msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0]))
++++
++++ if msg.Flags&syscall.RTM_F_CLONED != 0 {
++++ // Ignore cloned routes
++++ continue
++++ }
++++
++++ if msg.Table != syscall.RT_TABLE_MAIN {
++++ // Ignore non-main tables
++++ continue
++++ }
++++
++++ if msg.Family != syscall.AF_INET {
++++ // Ignore non-ipv4 routes
++++ continue
++++ }
++++
++++ if msg.Dst_len == 0 {
++++ // Default routes
++++ r.Default = true
++++ }
++++
++++ attrs, err := syscall.ParseNetlinkRouteAttr(&m)
++++ if err != nil {
++++ return nil, err
++++ }
++++ for _, attr := range attrs {
++++ switch attr.Attr.Type {
++++ case syscall.RTA_DST:
++++ ip := attr.Value
++++ r.IPNet = &net.IPNet{
++++ IP: ip,
++++ Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)),
++++ }
++++ case syscall.RTA_OIF:
++++ index := int(native.Uint32(attr.Value[0:4]))
++++ r.Iface, _ = net.InterfaceByIndex(index)
++++ }
++++ }
++++ if r.Default || r.IPNet != nil {
++++ res = append(res, r)
++++ }
++++ }
++++ }
++++
++++ return res, nil
++++}
++++
++++// Add a new route table entry.
++++func AddRoute(destination, source, gateway, device string) error {
++++ if destination == "" && source == "" && gateway == "" {
++++ return fmt.Errorf("one of destination, source or gateway must not be blank")
++++ }
++++
++++ s, err := getNetlinkSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer s.Close()
++++
++++ wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
++++ msg := newRtMsg()
++++ currentFamily := -1
++++ var rtAttrs []*RtAttr
++++
++++ if destination != "" {
++++ destIP, destNet, err := net.ParseCIDR(destination)
++++ if err != nil {
++++ return fmt.Errorf("destination CIDR %s couldn't be parsed", destination)
++++ }
++++ destFamily := getIpFamily(destIP)
++++ currentFamily = destFamily
++++ destLen, bits := destNet.Mask.Size()
++++ if destLen == 0 && bits == 0 {
++++ return fmt.Errorf("destination CIDR %s generated a non-canonical Mask", destination)
++++ }
++++ msg.Family = uint8(destFamily)
++++ msg.Dst_len = uint8(destLen)
++++ var destData []byte
++++ if destFamily == syscall.AF_INET {
++++ destData = destIP.To4()
++++ } else {
++++ destData = destIP.To16()
++++ }
++++ rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_DST, destData))
++++ }
++++
++++ if source != "" {
++++ srcIP, srcNet, err := net.ParseCIDR(source)
++++ if err != nil {
++++ return fmt.Errorf("source CIDR %s couldn't be parsed", source)
++++ }
++++ srcFamily := getIpFamily(srcIP)
++++ if currentFamily != -1 && currentFamily != srcFamily {
++++ return fmt.Errorf("source and destination ip were not the same IP family")
++++ }
++++ currentFamily = srcFamily
++++ srcLen, bits := srcNet.Mask.Size()
++++ if srcLen == 0 && bits == 0 {
++++ return fmt.Errorf("source CIDR %s generated a non-canonical Mask", source)
++++ }
++++ msg.Family = uint8(srcFamily)
++++ msg.Src_len = uint8(srcLen)
++++ var srcData []byte
++++ if srcFamily == syscall.AF_INET {
++++ srcData = srcIP.To4()
++++ } else {
++++ srcData = srcIP.To16()
++++ }
++++ rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_SRC, srcData))
++++ }
++++
++++ if gateway != "" {
++++ gwIP := net.ParseIP(gateway)
++++ if gwIP == nil {
++++ return fmt.Errorf("gateway IP %s couldn't be parsed", gateway)
++++ }
++++ gwFamily := getIpFamily(gwIP)
++++ if currentFamily != -1 && currentFamily != gwFamily {
++++ return fmt.Errorf("gateway, source, and destination ip were not the same IP family")
++++ }
++++ msg.Family = uint8(gwFamily)
++++ var gwData []byte
++++ if gwFamily == syscall.AF_INET {
++++ gwData = gwIP.To4()
++++ } else {
++++ gwData = gwIP.To16()
++++ }
++++ rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_GATEWAY, gwData))
++++ }
++++
++++ wb.AddData(msg)
++++ for _, attr := range rtAttrs {
++++ wb.AddData(attr)
++++ }
++++
++++ iface, err := net.InterfaceByName(device)
++++ if err != nil {
++++ return err
++++ }
++++ wb.AddData(uint32Attr(syscall.RTA_OIF, uint32(iface.Index)))
++++
++++ if err := s.Send(wb); err != nil {
++++ return err
++++ }
++++ return s.HandleAck(wb.Seq)
++++}
++++
++++// Add a new default gateway. Identical to:
++++// ip route add default via $ip
++++func AddDefaultGw(ip, device string) error {
++++ return AddRoute("", "", ip, device)
++++}
++++
++++// THIS CODE DOES NOT COMMUNICATE WITH KERNEL VIA RTNETLINK INTERFACE
++++// IT IS HERE FOR BACKWARDS COMPATIBILITY WITH OLDER LINUX KERNELS
++++// WHICH SHIP WITH OLDER NOT ENTIRELY FUNCTIONAL VERSION OF NETLINK
++++func getIfSocket() (fd int, err error) {
++++ for _, socket := range []int{
++++ syscall.AF_INET,
++++ syscall.AF_PACKET,
++++ syscall.AF_INET6,
++++ } {
++++ if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil {
++++ break
++++ }
++++ }
++++ if err == nil {
++++ return fd, nil
++++ }
++++ return -1, err
++++}
++++
++++// Create the actual bridge device. This is more backward-compatible than
++++// netlink.NetworkLinkAdd and works on RHEL 6.
++++func CreateBridge(name string, setMacAddr bool) error {
++++ if len(name) >= IFNAMSIZ {
++++ return fmt.Errorf("Interface name %s too long", name)
++++ }
++++
++++ s, err := getIfSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer syscall.Close(s)
++++
++++ nameBytePtr, err := syscall.BytePtrFromString(name)
++++ if err != nil {
++++ return err
++++ }
++++ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 {
++++ return err
++++ }
++++ if setMacAddr {
++++ return SetMacAddress(name, randMacAddr())
++++ }
++++ return nil
++++}
++++
++++// Delete the actual bridge device.
++++func DeleteBridge(name string) error {
++++ s, err := getIfSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer syscall.Close(s)
++++
++++ nameBytePtr, err := syscall.BytePtrFromString(name)
++++ if err != nil {
++++ return err
++++ }
++++
++++ var ifr ifreqFlags
++++ copy(ifr.IfrnName[:len(ifr.IfrnName)-1], []byte(name))
++++ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s),
++++ syscall.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifr))); err != 0 {
++++ return err
++++ }
++++
++++ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s),
++++ SIOC_BRDELBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 {
++++ return err
++++ }
++++ return nil
++++}
++++
++++// Add a slave to abridge device. This is more backward-compatible than
++++// netlink.NetworkSetMaster and works on RHEL 6.
++++func AddToBridge(iface, master *net.Interface) error {
++++ if len(master.Name) >= IFNAMSIZ {
++++ return fmt.Errorf("Interface name %s too long", master.Name)
++++ }
++++
++++ s, err := getIfSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer syscall.Close(s)
++++
++++ ifr := ifreqIndex{}
++++ copy(ifr.IfrnName[:len(ifr.IfrnName)-1], master.Name)
++++ ifr.IfruIndex = int32(iface.Index)
++++
++++ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDIF, uintptr(unsafe.Pointer(&ifr))); err != 0 {
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++func randMacAddr() string {
++++ hw := make(net.HardwareAddr, 6)
++++ for i := 0; i < 6; i++ {
++++ hw[i] = byte(rand.Intn(255))
++++ }
++++ hw[0] &^= 0x1 // clear multicast bit
++++ hw[0] |= 0x2 // set local assignment bit (IEEE802)
++++ return hw.String()
++++}
++++
++++func SetMacAddress(name, addr string) error {
++++ if len(name) >= IFNAMSIZ {
++++ return fmt.Errorf("Interface name %s too long", name)
++++ }
++++
++++ hw, err := net.ParseMAC(addr)
++++ if err != nil {
++++ return err
++++ }
++++
++++ s, err := getIfSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer syscall.Close(s)
++++
++++ ifr := ifreqHwaddr{}
++++ ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER
++++ copy(ifr.IfrnName[:len(ifr.IfrnName)-1], name)
++++
++++ for i := 0; i < 6; i++ {
++++ ifr.IfruHwaddr.Data[i] = ifrDataByte(hw[i])
++++ }
++++
++++ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 {
++++ return err
++++ }
++++ return nil
++++}
++++
++++func ChangeName(iface *net.Interface, newName string) error {
++++ if len(newName) >= IFNAMSIZ {
++++ return fmt.Errorf("Interface name %s too long", newName)
++++ }
++++
++++ fd, err := getIfSocket()
++++ if err != nil {
++++ return err
++++ }
++++ defer syscall.Close(fd)
++++
++++ data := [IFNAMSIZ * 2]byte{}
++++ // the "-1"s here are very important for ensuring we get proper null
++++ // termination of our new C strings
++++ copy(data[:IFNAMSIZ-1], iface.Name)
++++ copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName)
++++
++++ if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 {
++++ return errno
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package netlink
++++
++++func ifrDataByte(b byte) uint8 {
++++ return uint8(b)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !arm
++++
++++package netlink
++++
++++func ifrDataByte(b byte) int8 {
++++ return int8(b)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package netlink
++++
++++import (
++++ "net"
++++ "strings"
++++ "syscall"
++++ "testing"
++++)
++++
++++type testLink struct {
++++ name string
++++ linkType string
++++}
++++
++++func addLink(t *testing.T, name string, linkType string) {
++++ if err := NetworkLinkAdd(name, linkType); err != nil {
++++ t.Fatalf("Unable to create %s link: %s", name, err)
++++ }
++++}
++++
++++func readLink(t *testing.T, name string) *net.Interface {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ t.Fatalf("Could not find %s interface: %s", name, err)
++++ }
++++
++++ return iface
++++}
++++
++++func deleteLink(t *testing.T, name string) {
++++ if err := NetworkLinkDel(name); err != nil {
++++ t.Fatalf("Unable to delete %s link: %s", name, err)
++++ }
++++}
++++
++++func upLink(t *testing.T, name string) {
++++ iface := readLink(t, name)
++++ if err := NetworkLinkUp(iface); err != nil {
++++ t.Fatalf("Could not bring UP %#v interface: %s", iface, err)
++++ }
++++}
++++
++++func downLink(t *testing.T, name string) {
++++ iface := readLink(t, name)
++++ if err := NetworkLinkDown(iface); err != nil {
++++ t.Fatalf("Could not bring DOWN %#v interface: %s", iface, err)
++++ }
++++}
++++
++++func ipAssigned(iface *net.Interface, ip net.IP) bool {
++++ addrs, _ := iface.Addrs()
++++
++++ for _, addr := range addrs {
++++ args := strings.SplitN(addr.String(), "/", 2)
++++ if args[0] == ip.String() {
++++ return true
++++ }
++++ }
++++
++++ return false
++++}
++++
++++func TestNetworkLinkAddDel(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ testLinks := []testLink{
++++ {"tstEth", "dummy"},
++++ {"tstBr", "bridge"},
++++ }
++++
++++ for _, tl := range testLinks {
++++ addLink(t, tl.name, tl.linkType)
++++ defer deleteLink(t, tl.name)
++++ readLink(t, tl.name)
++++ }
++++}
++++
++++func TestNetworkLinkUpDown(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ tl := testLink{name: "tstEth", linkType: "dummy"}
++++
++++ addLink(t, tl.name, tl.linkType)
++++ defer deleteLink(t, tl.name)
++++
++++ upLink(t, tl.name)
++++ ifcAfterUp := readLink(t, tl.name)
++++
++++ if (ifcAfterUp.Flags & syscall.IFF_UP) != syscall.IFF_UP {
++++ t.Fatalf("Could not bring UP %#v initerface", tl)
++++ }
++++
++++ downLink(t, tl.name)
++++ ifcAfterDown := readLink(t, tl.name)
++++
++++ if (ifcAfterDown.Flags & syscall.IFF_UP) == syscall.IFF_UP {
++++ t.Fatalf("Could not bring DOWN %#v initerface", tl)
++++ }
++++}
++++
++++func TestNetworkSetMacAddress(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ tl := testLink{name: "tstEth", linkType: "dummy"}
++++ macaddr := "22:ce:e0:99:63:6f"
++++
++++ addLink(t, tl.name, tl.linkType)
++++ defer deleteLink(t, tl.name)
++++
++++ ifcBeforeSet := readLink(t, tl.name)
++++
++++ if err := NetworkSetMacAddress(ifcBeforeSet, macaddr); err != nil {
++++ t.Fatalf("Could not set %s MAC address on %#v interface: err", macaddr, tl, err)
++++ }
++++
++++ ifcAfterSet := readLink(t, tl.name)
++++
++++ if ifcAfterSet.HardwareAddr.String() != macaddr {
++++ t.Fatalf("Could not set %s MAC address on %#v interface", macaddr, tl)
++++ }
++++}
++++
++++func TestNetworkSetMTU(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ tl := testLink{name: "tstEth", linkType: "dummy"}
++++ mtu := 1400
++++
++++ addLink(t, tl.name, tl.linkType)
++++ defer deleteLink(t, tl.name)
++++
++++ ifcBeforeSet := readLink(t, tl.name)
++++
++++ if err := NetworkSetMTU(ifcBeforeSet, mtu); err != nil {
++++ t.Fatalf("Could not set %d MTU on %#v interface: err", mtu, tl, err)
++++ }
++++
++++ ifcAfterSet := readLink(t, tl.name)
++++
++++ if ifcAfterSet.MTU != mtu {
++++ t.Fatalf("Could not set %d MTU on %#v interface", mtu, tl)
++++ }
++++}
++++
++++func TestNetworkSetMasterNoMaster(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ master := testLink{"tstBr", "bridge"}
++++ slave := testLink{"tstEth", "dummy"}
++++ testLinks := []testLink{master, slave}
++++
++++ for _, tl := range testLinks {
++++ addLink(t, tl.name, tl.linkType)
++++ defer deleteLink(t, tl.name)
++++ upLink(t, tl.name)
++++ }
++++
++++ masterIfc := readLink(t, master.name)
++++ slaveIfc := readLink(t, slave.name)
++++ if err := NetworkSetMaster(slaveIfc, masterIfc); err != nil {
++++ t.Fatalf("Could not set %#v to be the master of %#v: %s", master, slave, err)
++++ }
++++
++++ // Trying to figure out a way to test which will not break on RHEL6.
++++ // We could check for existence of /sys/class/net/tstEth/upper_tstBr
++++ // which should point to the ../tstBr which is the UPPER device i.e. network bridge
++++
++++ if err := NetworkSetNoMaster(slaveIfc); err != nil {
++++ t.Fatalf("Could not UNset %#v master of %#v: %s", master, slave, err)
++++ }
++++}
++++
++++func TestNetworkChangeName(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ tl := testLink{"tstEth", "dummy"}
++++ newName := "newTst"
++++
++++ addLink(t, tl.name, tl.linkType)
++++
++++ linkIfc := readLink(t, tl.name)
++++ if err := NetworkChangeName(linkIfc, newName); err != nil {
++++ deleteLink(t, tl.name)
++++ t.Fatalf("Could not change %#v interface name to %s: %s", tl, newName, err)
++++ }
++++
++++ readLink(t, newName)
++++ deleteLink(t, newName)
++++}
++++
++++func TestNetworkLinkAddVlan(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ tl := struct {
++++ name string
++++ id uint16
++++ }{
++++ name: "tstVlan",
++++ id: 32,
++++ }
++++ masterLink := testLink{"tstEth", "dummy"}
++++
++++ addLink(t, masterLink.name, masterLink.linkType)
++++ defer deleteLink(t, masterLink.name)
++++
++++ if err := NetworkLinkAddVlan(masterLink.name, tl.name, tl.id); err != nil {
++++ t.Fatalf("Unable to create %#v VLAN interface: %s", tl, err)
++++ }
++++
++++ readLink(t, tl.name)
++++}
++++
++++func TestNetworkLinkAddMacVlan(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ tl := struct {
++++ name string
++++ mode string
++++ }{
++++ name: "tstVlan",
++++ mode: "private",
++++ }
++++ masterLink := testLink{"tstEth", "dummy"}
++++
++++ addLink(t, masterLink.name, masterLink.linkType)
++++ defer deleteLink(t, masterLink.name)
++++
++++ if err := NetworkLinkAddMacVlan(masterLink.name, tl.name, tl.mode); err != nil {
++++ t.Fatalf("Unable to create %#v MAC VLAN interface: %s", tl, err)
++++ }
++++
++++ readLink(t, tl.name)
++++}
++++
++++func TestAddDelNetworkIp(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ ifaceName := "lo"
++++ ip := net.ParseIP("127.0.1.1")
++++ mask := net.IPv4Mask(255, 255, 255, 255)
++++ ipNet := &net.IPNet{IP: ip, Mask: mask}
++++
++++ iface, err := net.InterfaceByName(ifaceName)
++++ if err != nil {
++++ t.Skip("No 'lo' interface; skipping tests")
++++ }
++++
++++ if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil {
++++ t.Fatalf("Could not add IP address %s to interface %#v: %s", ip.String(), iface, err)
++++ }
++++
++++ if !ipAssigned(iface, ip) {
++++ t.Fatalf("Could not locate address '%s' in lo address list.", ip.String())
++++ }
++++
++++ if err := NetworkLinkDelIp(iface, ip, ipNet); err != nil {
++++ t.Fatalf("Could not delete IP address %s from interface %#v: %s", ip.String(), iface, err)
++++ }
++++
++++ if ipAssigned(iface, ip) {
++++ t.Fatalf("Located address '%s' in lo address list after removal.", ip.String())
++++ }
++++}
++++
++++func TestCreateVethPair(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ var (
++++ name1 = "veth1"
++++ name2 = "veth2"
++++ )
++++
++++ if err := NetworkCreateVethPair(name1, name2); err != nil {
++++ t.Fatalf("Could not create veth pair %s %s: %s", name1, name2, err)
++++ }
++++ defer NetworkLinkDel(name1)
++++
++++ readLink(t, name1)
++++ readLink(t, name2)
++++}
++++
++++//
++++// netlink package tests which do not use RTNETLINK
++++//
++++func TestCreateBridgeWithMac(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ name := "testbridge"
++++
++++ if err := CreateBridge(name, true); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if _, err := net.InterfaceByName(name); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ // cleanup and tests
++++
++++ if err := DeleteBridge(name); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if _, err := net.InterfaceByName(name); err == nil {
++++ t.Fatalf("expected error getting interface because %s bridge was deleted", name)
++++ }
++++}
++++
++++func TestSetMacAddress(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ name := "testmac"
++++ mac := randMacAddr()
++++
++++ if err := NetworkLinkAdd(name, "bridge"); err != nil {
++++ t.Fatal(err)
++++ }
++++ defer NetworkLinkDel(name)
++++
++++ if err := SetMacAddress(name, mac); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if iface.HardwareAddr.String() != mac {
++++ t.Fatalf("mac address %q does not match %q", iface.HardwareAddr, mac)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !linux
++++
++++package netlink
++++
++++import (
++++ "errors"
++++ "net"
++++)
++++
++++var (
++++ ErrNotImplemented = errors.New("not implemented")
++++)
++++
++++func NetworkGetRoutes() ([]Route, error) {
++++ return nil, ErrNotImplemented
++++}
++++
++++func NetworkLinkAdd(name string, linkType string) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkLinkDel(name string) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkLinkUp(iface *net.Interface) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
++++ return ErrNotImplemented
++++}
++++
++++func AddRoute(destination, source, gateway, device string) error {
++++ return ErrNotImplemented
++++}
++++
++++func AddDefaultGw(ip, device string) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkSetMTU(iface *net.Interface, mtu int) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkCreateVethPair(name1, name2 string) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkChangeName(iface *net.Interface, newName string) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkSetNsFd(iface *net.Interface, fd int) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkSetNsPid(iface *net.Interface, nspid int) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkSetMaster(iface, master *net.Interface) error {
++++ return ErrNotImplemented
++++}
++++
++++func NetworkLinkDown(iface *net.Interface) error {
++++ return ErrNotImplemented
++++}
++++
++++func CreateBridge(name string, setMacAddr bool) error {
++++ return ErrNotImplemented
++++}
++++
++++func DeleteBridge(name string) error {
++++ return ErrNotImplemented
++++}
++++
++++func AddToBridge(iface, master *net.Interface) error {
++++ return ErrNotImplemented
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package network
++++
++++import (
++++ "fmt"
++++)
++++
++++// Loopback is a network strategy that provides a basic loopback device
++++type Loopback struct {
++++}
++++
++++func (l *Loopback) Create(n *Network, nspid int, networkState *NetworkState) error {
++++ return nil
++++}
++++
++++func (l *Loopback) Initialize(config *Network, networkState *NetworkState) error {
++++ // Do not set the MTU on the loopback interface - use the default.
++++ if err := InterfaceUp("lo"); err != nil {
++++ return fmt.Errorf("lo up %s", err)
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package network
++++
++++import (
++++ "fmt"
++++ "os"
++++ "syscall"
++++
++++ "github.com/docker/libcontainer/system"
++++)
++++
++++// crosbymichael: could make a network strategy that instead of returning veth pair names it returns a pid to an existing network namespace
++++type NetNS struct {
++++}
++++
++++func (v *NetNS) Create(n *Network, nspid int, networkState *NetworkState) error {
++++ networkState.NsPath = n.NsPath
++++ return nil
++++}
++++
++++func (v *NetNS) Initialize(config *Network, networkState *NetworkState) error {
++++ if networkState.NsPath == "" {
++++ return fmt.Errorf("nspath does is not specified in NetworkState")
++++ }
++++
++++ f, err := os.OpenFile(networkState.NsPath, os.O_RDONLY, 0)
++++ if err != nil {
++++ return fmt.Errorf("failed get network namespace fd: %v", err)
++++ }
++++
++++ if err := system.Setns(f.Fd(), syscall.CLONE_NEWNET); err != nil {
++++ f.Close()
++++ return fmt.Errorf("failed to setns current network namespace: %v", err)
++++ }
++++
++++ f.Close()
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package network
++++
++++import (
++++ "net"
++++
++++ "github.com/docker/libcontainer/netlink"
++++)
++++
++++func InterfaceUp(name string) error {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.NetworkLinkUp(iface)
++++}
++++
++++func InterfaceDown(name string) error {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.NetworkLinkDown(iface)
++++}
++++
++++func ChangeInterfaceName(old, newName string) error {
++++ iface, err := net.InterfaceByName(old)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.NetworkChangeName(iface, newName)
++++}
++++
++++func CreateVethPair(name1, name2 string) error {
++++ return netlink.NetworkCreateVethPair(name1, name2)
++++}
++++
++++func SetInterfaceInNamespacePid(name string, nsPid int) error {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.NetworkSetNsPid(iface, nsPid)
++++}
++++
++++func SetInterfaceInNamespaceFd(name string, fd uintptr) error {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.NetworkSetNsFd(iface, int(fd))
++++}
++++
++++func SetInterfaceMaster(name, master string) error {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++ masterIface, err := net.InterfaceByName(master)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.AddToBridge(iface, masterIface)
++++}
++++
++++func SetDefaultGateway(ip, ifaceName string) error {
++++ return netlink.AddDefaultGw(ip, ifaceName)
++++}
++++
++++func SetInterfaceMac(name string, macaddr string) error {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.NetworkSetMacAddress(iface, macaddr)
++++}
++++
++++func SetInterfaceIp(name string, rawIp string) error {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++ ip, ipNet, err := net.ParseCIDR(rawIp)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.NetworkLinkAddIp(iface, ip, ipNet)
++++}
++++
++++func SetMtu(name string, mtu int) error {
++++ iface, err := net.InterfaceByName(name)
++++ if err != nil {
++++ return err
++++ }
++++ return netlink.NetworkSetMTU(iface, mtu)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package network
++++
++++import (
++++ "io/ioutil"
++++ "os"
++++ "path/filepath"
++++ "strconv"
++++ "strings"
++++)
++++
++++type NetworkStats struct {
++++ RxBytes uint64 `json:"rx_bytes"`
++++ RxPackets uint64 `json:"rx_packets"`
++++ RxErrors uint64 `json:"rx_errors"`
++++ RxDropped uint64 `json:"rx_dropped"`
++++ TxBytes uint64 `json:"tx_bytes"`
++++ TxPackets uint64 `json:"tx_packets"`
++++ TxErrors uint64 `json:"tx_errors"`
++++ TxDropped uint64 `json:"tx_dropped"`
++++}
++++
++++// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
++++func GetStats(networkState *NetworkState) (*NetworkStats, error) {
++++ // This can happen if the network runtime information is missing - possible if the container was created by an old version of libcontainer.
++++ if networkState.VethHost == "" {
++++ return &NetworkStats{}, nil
++++ }
++++ data, err := readSysfsNetworkStats(networkState.VethHost)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
++++ return &NetworkStats{
++++ RxBytes: data["tx_bytes"],
++++ RxPackets: data["tx_packets"],
++++ RxErrors: data["tx_errors"],
++++ RxDropped: data["tx_dropped"],
++++ TxBytes: data["rx_bytes"],
++++ TxPackets: data["rx_packets"],
++++ TxErrors: data["rx_errors"],
++++ TxDropped: data["rx_dropped"],
++++ }, nil
++++}
++++
++++// Reads all the statistics available under /sys/class/net/<EthInterface>/statistics as a map with file name as key and data as integers.
++++func readSysfsNetworkStats(ethInterface string) (map[string]uint64, error) {
++++ out := make(map[string]uint64)
++++
++++ fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics/")
++++ err := filepath.Walk(fullPath, func(path string, _ os.FileInfo, _ error) error {
++++ // skip fullPath.
++++ if path == fullPath {
++++ return nil
++++ }
++++ base := filepath.Base(path)
++++ data, err := ioutil.ReadFile(path)
++++ if err != nil {
++++ return err
++++ }
++++ value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
++++ if err != nil {
++++ return err
++++ }
++++ out[base] = value
++++ return nil
++++ })
++++ return out, err
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package network
++++
++++import (
++++ "errors"
++++)
++++
++++var (
++++ ErrNotValidStrategyType = errors.New("not a valid network strategy type")
++++)
++++
++++var strategies = map[string]NetworkStrategy{
++++ "veth": &Veth{},
++++ "loopback": &Loopback{},
++++ "netns": &NetNS{},
++++}
++++
++++// NetworkStrategy represents a specific network configuration for
++++// a container's networking stack
++++type NetworkStrategy interface {
++++ Create(*Network, int, *NetworkState) error
++++ Initialize(*Network, *NetworkState) error
++++}
++++
++++// GetStrategy returns the specific network strategy for the
++++// provided type. If no strategy is registered for the type an
++++// ErrNotValidStrategyType is returned.
++++func GetStrategy(tpe string) (NetworkStrategy, error) {
++++ s, exists := strategies[tpe]
++++ if !exists {
++++ return nil, ErrNotValidStrategyType
++++ }
++++ return s, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package network
++++
++++// Network defines configuration for a container's networking stack
++++//
++++// The network configuration can be omited from a container causing the
++++// container to be setup with the host's networking stack
++++type Network struct {
++++ // Type sets the networks type, commonly veth and loopback
++++ Type string `json:"type,omitempty"`
++++
++++ // Path to network namespace
++++ NsPath string `json:"ns_path,omitempty"`
++++
++++ // The bridge to use.
++++ Bridge string `json:"bridge,omitempty"`
++++
++++ // Prefix for the veth interfaces.
++++ VethPrefix string `json:"veth_prefix,omitempty"`
++++
++++ // MacAddress contains the MAC address to set on the network interface
++++ MacAddress string `json:"mac_address,omitempty"`
++++
++++ // Address contains the IPv4 and mask to set on the network interface
++++ Address string `json:"address,omitempty"`
++++
++++ // IPv6Address contains the IPv6 and mask to set on the network interface
++++ IPv6Address string `json:"ipv6_address,omitempty"`
++++
++++ // Gateway sets the gateway address that is used as the default for the interface
++++ Gateway string `json:"gateway,omitempty"`
++++
++++ // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface
++++ IPv6Gateway string `json:"ipv6_gateway,omitempty"`
++++
++++ // Mtu sets the mtu value for the interface and will be mirrored on both the host and
++++ // container's interfaces if a pair is created, specifically in the case of type veth
++++ // Note: This does not apply to loopback interfaces.
++++ Mtu int `json:"mtu,omitempty"`
++++}
++++
++++// Struct describing the network specific runtime state that will be maintained by libcontainer for all running containers
++++// Do not depend on it outside of libcontainer.
++++type NetworkState struct {
++++ // The name of the veth interface on the Host.
++++ VethHost string `json:"veth_host,omitempty"`
++++ // The name of the veth interface created inside the container for the child.
++++ VethChild string `json:"veth_child,omitempty"`
++++ // Net namespace path.
++++ NsPath string `json:"ns_path,omitempty"`
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package network
++++
++++import (
++++ "fmt"
++++
++++ "github.com/docker/libcontainer/netlink"
++++ "github.com/docker/libcontainer/utils"
++++)
++++
++++// Veth is a network strategy that uses a bridge and creates
++++// a veth pair, one that stays outside on the host and the other
++++// is placed inside the container's namespace
++++type Veth struct {
++++}
++++
++++const defaultDevice = "eth0"
++++
++++func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error {
++++ var (
++++ bridge = n.Bridge
++++ prefix = n.VethPrefix
++++ )
++++ if bridge == "" {
++++ return fmt.Errorf("bridge is not specified")
++++ }
++++ if prefix == "" {
++++ return fmt.Errorf("veth prefix is not specified")
++++ }
++++ name1, name2, err := createVethPair(prefix)
++++ if err != nil {
++++ return err
++++ }
++++ if err := SetInterfaceMaster(name1, bridge); err != nil {
++++ return err
++++ }
++++ if err := SetMtu(name1, n.Mtu); err != nil {
++++ return err
++++ }
++++ if err := InterfaceUp(name1); err != nil {
++++ return err
++++ }
++++ if err := SetInterfaceInNamespacePid(name2, nspid); err != nil {
++++ return err
++++ }
++++ networkState.VethHost = name1
++++ networkState.VethChild = name2
++++
++++ return nil
++++}
++++
++++func (v *Veth) Initialize(config *Network, networkState *NetworkState) error {
++++ var vethChild = networkState.VethChild
++++ if vethChild == "" {
++++ return fmt.Errorf("vethChild is not specified")
++++ }
++++ if err := InterfaceDown(vethChild); err != nil {
++++ return fmt.Errorf("interface down %s %s", vethChild, err)
++++ }
++++ if err := ChangeInterfaceName(vethChild, defaultDevice); err != nil {
++++ return fmt.Errorf("change %s to %s %s", vethChild, defaultDevice, err)
++++ }
++++ if config.MacAddress != "" {
++++ if err := SetInterfaceMac(defaultDevice, config.MacAddress); err != nil {
++++ return fmt.Errorf("set %s mac %s", defaultDevice, err)
++++ }
++++ }
++++ if err := SetInterfaceIp(defaultDevice, config.Address); err != nil {
++++ return fmt.Errorf("set %s ip %s", defaultDevice, err)
++++ }
++++ if config.IPv6Address != "" {
++++ if err := SetInterfaceIp(defaultDevice, config.IPv6Address); err != nil {
++++ return fmt.Errorf("set %s ipv6 %s", defaultDevice, err)
++++ }
++++ }
++++
++++ if err := SetMtu(defaultDevice, config.Mtu); err != nil {
++++ return fmt.Errorf("set %s mtu to %d %s", defaultDevice, config.Mtu, err)
++++ }
++++ if err := InterfaceUp(defaultDevice); err != nil {
++++ return fmt.Errorf("%s up %s", defaultDevice, err)
++++ }
++++ if config.Gateway != "" {
++++ if err := SetDefaultGateway(config.Gateway, defaultDevice); err != nil {
++++ return fmt.Errorf("set gateway to %s on device %s failed with %s", config.Gateway, defaultDevice, err)
++++ }
++++ }
++++ if config.IPv6Gateway != "" {
++++ if err := SetDefaultGateway(config.IPv6Gateway, defaultDevice); err != nil {
++++ return fmt.Errorf("set gateway for ipv6 to %s on device %s failed with %s", config.IPv6Gateway, defaultDevice, err)
++++ }
++++ }
++++ return nil
++++}
++++
++++// createVethPair will automatically generage two random names for
++++// the veth pair and ensure that they have been created
++++func createVethPair(prefix string) (name1 string, name2 string, err error) {
++++ for i := 0; i < 10; i++ {
++++ if name1, err = utils.GenerateRandomName(prefix, 7); err != nil {
++++ return
++++ }
++++
++++ if name2, err = utils.GenerateRandomName(prefix, 7); err != nil {
++++ return
++++ }
++++
++++ if err = CreateVethPair(name1, name2); err != nil {
++++ if err == netlink.ErrInterfaceExists {
++++ continue
++++ }
++++
++++ return
++++ }
++++
++++ break
++++ }
++++
++++ return
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package network
++++
++++import (
++++ "testing"
++++
++++ "github.com/docker/libcontainer/netlink"
++++)
++++
++++func TestGenerateVethNames(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ prefix := "veth"
++++
++++ name1, name2, err := createVethPair(prefix)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if name1 == "" {
++++ t.Fatal("name1 should not be empty")
++++ }
++++
++++ if name2 == "" {
++++ t.Fatal("name2 should not be empty")
++++ }
++++}
++++
++++func TestCreateDuplicateVethPair(t *testing.T) {
++++ if testing.Short() {
++++ return
++++ }
++++
++++ prefix := "veth"
++++
++++ name1, name2, err := createVethPair(prefix)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ // retry to create the name interfaces and make sure that we get the correct error
++++ err = CreateVethPair(name1, name2)
++++ if err == nil {
++++ t.Fatal("expected error to not be nil with duplicate interface")
++++ }
++++
++++ if err != netlink.ErrInterfaceExists {
++++ t.Fatalf("expected error to be ErrInterfaceExists but received %q", err)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "encoding/json"
++++ "fmt"
++++ "log"
++++
++++ "github.com/codegangsta/cli"
++++)
++++
++++var configCommand = cli.Command{
++++ Name: "config",
++++ Usage: "display the container configuration",
++++ Action: configAction,
++++}
++++
++++func configAction(context *cli.Context) {
++++ container, err := loadConfig()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ data, err := json.MarshalIndent(container, "", "\t")
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ fmt.Printf("%s", data)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "fmt"
++++ "io"
++++ "log"
++++ "os"
++++ "os/exec"
++++ "os/signal"
++++ "syscall"
++++ "text/tabwriter"
++++
++++ "github.com/codegangsta/cli"
++++ "github.com/docker/docker/pkg/term"
++++ "github.com/docker/libcontainer"
++++ consolepkg "github.com/docker/libcontainer/console"
++++ "github.com/docker/libcontainer/namespaces"
++++)
++++
++++var execCommand = cli.Command{
++++ Name: "exec",
++++ Usage: "execute a new command inside a container",
++++ Action: execAction,
++++ Flags: []cli.Flag{
++++ cli.BoolFlag{Name: "list", Usage: "list all registered exec functions"},
++++ cli.StringFlag{Name: "func", Value: "exec", Usage: "function name to exec inside a container"},
++++ },
++++}
++++
++++func execAction(context *cli.Context) {
++++ if context.Bool("list") {
++++ w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0)
++++ fmt.Fprint(w, "NAME\tUSAGE\n")
++++
++++ for k, f := range argvs {
++++ fmt.Fprintf(w, "%s\t%s\n", k, f.Usage)
++++ }
++++
++++ w.Flush()
++++
++++ return
++++ }
++++
++++ var exitCode int
++++
++++ container, err := loadConfig()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ state, err := libcontainer.GetState(dataPath)
++++ if err != nil && !os.IsNotExist(err) {
++++ log.Fatalf("unable to read state.json: %s", err)
++++ }
++++
++++ if state != nil {
++++ exitCode, err = startInExistingContainer(container, state, context.String("func"), context)
++++ } else {
++++ exitCode, err = startContainer(container, dataPath, []string(context.Args()))
++++ }
++++
++++ if err != nil {
++++ log.Fatalf("failed to exec: %s", err)
++++ }
++++
++++ os.Exit(exitCode)
++++}
++++
++++// the process for execing a new process inside an existing container is that we have to exec ourself
++++// with the nsenter argument so that the C code can setns an the namespaces that we require. Then that
++++// code path will drop us into the path that we can do the final setup of the namespace and exec the users
++++// application.
++++func startInExistingContainer(config *libcontainer.Config, state *libcontainer.State, action string, context *cli.Context) (int, error) {
++++ var (
++++ master *os.File
++++ console string
++++ err error
++++
++++ sigc = make(chan os.Signal, 10)
++++
++++ stdin = os.Stdin
++++ stdout = os.Stdout
++++ stderr = os.Stderr
++++ )
++++ signal.Notify(sigc)
++++
++++ if config.Tty {
++++ stdin = nil
++++ stdout = nil
++++ stderr = nil
++++
++++ master, console, err = consolepkg.CreateMasterAndConsole()
++++ if err != nil {
++++ return -1, err
++++ }
++++
++++ go io.Copy(master, os.Stdin)
++++ go io.Copy(os.Stdout, master)
++++
++++ state, err := term.SetRawTerminal(os.Stdin.Fd())
++++ if err != nil {
++++ return -1, err
++++ }
++++
++++ defer term.RestoreTerminal(os.Stdin.Fd(), state)
++++ }
++++
++++ startCallback := func(cmd *exec.Cmd) {
++++ go func() {
++++ resizeTty(master)
++++
++++ for sig := range sigc {
++++ switch sig {
++++ case syscall.SIGWINCH:
++++ resizeTty(master)
++++ default:
++++ cmd.Process.Signal(sig)
++++ }
++++ }
++++ }()
++++ }
++++
++++ return namespaces.ExecIn(config, state, context.Args(), os.Args[0], action, stdin, stdout, stderr, console, startCallback)
++++}
++++
++++// startContainer starts the container. Returns the exit status or -1 and an
++++// error.
++++//
++++// Signals sent to the current process will be forwarded to container.
++++func startContainer(container *libcontainer.Config, dataPath string, args []string) (int, error) {
++++ var (
++++ cmd *exec.Cmd
++++ sigc = make(chan os.Signal, 10)
++++ )
++++
++++ signal.Notify(sigc)
++++
++++ createCommand := func(container *libcontainer.Config, console, dataPath, init string, pipe *os.File, args []string) *exec.Cmd {
++++ cmd = namespaces.DefaultCreateCommand(container, console, dataPath, init, pipe, args)
++++ if logPath != "" {
++++ cmd.Env = append(cmd.Env, fmt.Sprintf("log=%s", logPath))
++++ }
++++ return cmd
++++ }
++++
++++ var (
++++ master *os.File
++++ console string
++++ err error
++++
++++ stdin = os.Stdin
++++ stdout = os.Stdout
++++ stderr = os.Stderr
++++ )
++++
++++ if container.Tty {
++++ stdin = nil
++++ stdout = nil
++++ stderr = nil
++++
++++ master, console, err = consolepkg.CreateMasterAndConsole()
++++ if err != nil {
++++ return -1, err
++++ }
++++
++++ go io.Copy(master, os.Stdin)
++++ go io.Copy(os.Stdout, master)
++++
++++ state, err := term.SetRawTerminal(os.Stdin.Fd())
++++ if err != nil {
++++ return -1, err
++++ }
++++
++++ defer term.RestoreTerminal(os.Stdin.Fd(), state)
++++ }
++++
++++ startCallback := func() {
++++ go func() {
++++ resizeTty(master)
++++
++++ for sig := range sigc {
++++ switch sig {
++++ case syscall.SIGWINCH:
++++ resizeTty(master)
++++ default:
++++ cmd.Process.Signal(sig)
++++ }
++++ }
++++ }()
++++ }
++++
++++ return namespaces.Exec(container, stdin, stdout, stderr, console, dataPath, args, createCommand, startCallback)
++++}
++++
++++func resizeTty(master *os.File) {
++++ if master == nil {
++++ return
++++ }
++++
++++ ws, err := term.GetWinsize(os.Stdin.Fd())
++++ if err != nil {
++++ return
++++ }
++++
++++ if err := term.SetWinsize(master.Fd(), ws); err != nil {
++++ return
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "log"
++++ "os"
++++ "runtime"
++++ "strconv"
++++
++++ "github.com/codegangsta/cli"
++++ "github.com/docker/libcontainer/namespaces"
++++ "github.com/docker/libcontainer/syncpipe"
++++)
++++
++++var (
++++ dataPath = os.Getenv("data_path")
++++ console = os.Getenv("console")
++++ rawPipeFd = os.Getenv("pipe")
++++
++++ initCommand = cli.Command{
++++ Name: "init",
++++ Usage: "runs the init process inside the namespace",
++++ Action: initAction,
++++ }
++++)
++++
++++func initAction(context *cli.Context) {
++++ runtime.LockOSThread()
++++
++++ container, err := loadConfig()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ rootfs, err := os.Getwd()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ pipeFd, err := strconv.Atoi(rawPipeFd)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(pipeFd))
++++ if err != nil {
++++ log.Fatalf("unable to create sync pipe: %s", err)
++++ }
++++
++++ if err := namespaces.Init(container, rootfs, console, syncPipe, []string(context.Args())); err != nil {
++++ log.Fatalf("unable to initialize for container: %s", err)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "log"
++++ "os"
++++ "strings"
++++
++++ "github.com/codegangsta/cli"
++++)
++++
++++var (
++++ logPath = os.Getenv("log")
++++ argvs = make(map[string]*rFunc)
++++)
++++
++++func init() {
++++ argvs["exec"] = &rFunc{
++++ Usage: "execute a process inside an existing container",
++++ Action: nsenterExec,
++++ }
++++
++++ argvs["mknod"] = &rFunc{
++++ Usage: "mknod a device inside an existing container",
++++ Action: nsenterMknod,
++++ }
++++
++++ argvs["ip"] = &rFunc{
++++ Usage: "display the container's network interfaces",
++++ Action: nsenterIp,
++++ }
++++}
++++
++++func main() {
++++ // we need to check our argv 0 for any registred functions to run instead of the
++++ // normal cli code path
++++ f, exists := argvs[strings.TrimPrefix(os.Args[0], "nsenter-")]
++++ if exists {
++++ runFunc(f)
++++
++++ return
++++ }
++++
++++ app := cli.NewApp()
++++
++++ app.Name = "nsinit"
++++ app.Version = "0.1"
++++ app.Author = "libcontainer maintainers"
++++ app.Flags = []cli.Flag{
++++ cli.StringFlag{Name: "nspid"},
++++ cli.StringFlag{Name: "console"},
++++ }
++++
++++ app.Before = preload
++++
++++ app.Commands = []cli.Command{
++++ execCommand,
++++ initCommand,
++++ statsCommand,
++++ configCommand,
++++ pauseCommand,
++++ unpauseCommand,
++++ }
++++
++++ if err := app.Run(os.Args); err != nil {
++++ log.Fatal(err)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "fmt"
++++ "log"
++++ "net"
++++ "os"
++++ "strconv"
++++ "strings"
++++ "text/tabwriter"
++++
++++ "github.com/docker/libcontainer"
++++ "github.com/docker/libcontainer/devices"
++++ "github.com/docker/libcontainer/mount/nodes"
++++ "github.com/docker/libcontainer/namespaces"
++++ _ "github.com/docker/libcontainer/namespaces/nsenter"
++++)
++++
++++// nsenterExec exec's a process inside an existing container
++++func nsenterExec(config *libcontainer.Config, args []string) {
++++ if err := namespaces.FinalizeSetns(config, args); err != nil {
++++ log.Fatalf("failed to nsenter: %s", err)
++++ }
++++}
++++
++++// nsenterMknod runs mknod inside an existing container
++++//
++++// mknod <path> <type> <major> <minor>
++++func nsenterMknod(config *libcontainer.Config, args []string) {
++++ if len(args) != 4 {
++++ log.Fatalf("expected mknod to have 4 arguments not %d", len(args))
++++ }
++++
++++ t := rune(args[1][0])
++++
++++ major, err := strconv.Atoi(args[2])
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ minor, err := strconv.Atoi(args[3])
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ n := &devices.Device{
++++ Path: args[0],
++++ Type: t,
++++ MajorNumber: int64(major),
++++ MinorNumber: int64(minor),
++++ }
++++
++++ if err := nodes.CreateDeviceNode("/", n); err != nil {
++++ log.Fatal(err)
++++ }
++++}
++++
++++// nsenterIp displays the network interfaces inside a container's net namespace
++++func nsenterIp(config *libcontainer.Config, args []string) {
++++ interfaces, err := net.Interfaces()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0)
++++ fmt.Fprint(w, "NAME\tMTU\tMAC\tFLAG\tADDRS\n")
++++
++++ for _, iface := range interfaces {
++++ addrs, err := iface.Addrs()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ o := []string{}
++++
++++ for _, a := range addrs {
++++ o = append(o, a.String())
++++ }
++++
++++ fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", iface.Name, iface.MTU, iface.HardwareAddr, iface.Flags, strings.Join(o, ","))
++++ }
++++
++++ w.Flush()
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "log"
++++
++++ "github.com/codegangsta/cli"
++++ "github.com/docker/libcontainer/cgroups"
++++ "github.com/docker/libcontainer/cgroups/fs"
++++ "github.com/docker/libcontainer/cgroups/systemd"
++++)
++++
++++var pauseCommand = cli.Command{
++++ Name: "pause",
++++ Usage: "pause the container's processes",
++++ Action: pauseAction,
++++}
++++
++++var unpauseCommand = cli.Command{
++++ Name: "unpause",
++++ Usage: "unpause the container's processes",
++++ Action: unpauseAction,
++++}
++++
++++func pauseAction(context *cli.Context) {
++++ if err := toggle(cgroups.Frozen); err != nil {
++++ log.Fatal(err)
++++ }
++++}
++++
++++func unpauseAction(context *cli.Context) {
++++ if err := toggle(cgroups.Thawed); err != nil {
++++ log.Fatal(err)
++++ }
++++}
++++
++++func toggle(state cgroups.FreezerState) error {
++++ container, err := loadConfig()
++++ if err != nil {
++++ return err
++++ }
++++
++++ if systemd.UseSystemd() {
++++ err = systemd.Freeze(container.Cgroups, state)
++++ } else {
++++ err = fs.Freeze(container.Cgroups, state)
++++ }
++++
++++ return err
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "encoding/json"
++++ "fmt"
++++ "log"
++++
++++ "github.com/codegangsta/cli"
++++ "github.com/docker/libcontainer"
++++)
++++
++++var statsCommand = cli.Command{
++++ Name: "stats",
++++ Usage: "display statistics for the container",
++++ Action: statsAction,
++++}
++++
++++func statsAction(context *cli.Context) {
++++ container, err := loadConfig()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ state, err := libcontainer.GetState(dataPath)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ stats, err := libcontainer.GetStats(container, state)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ data, err := json.MarshalIndent(stats, "", "\t")
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ fmt.Printf("%s", data)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "encoding/json"
++++ "log"
++++ "os"
++++ "path/filepath"
++++
++++ "github.com/codegangsta/cli"
++++ "github.com/docker/libcontainer"
++++ "github.com/docker/libcontainer/syncpipe"
++++)
++++
++++// rFunc is a function registration for calling after an execin
++++type rFunc struct {
++++ Usage string
++++ Action func(*libcontainer.Config, []string)
++++}
++++
++++func loadConfig() (*libcontainer.Config, error) {
++++ f, err := os.Open(filepath.Join(dataPath, "container.json"))
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer f.Close()
++++
++++ var container *libcontainer.Config
++++ if err := json.NewDecoder(f).Decode(&container); err != nil {
++++ return nil, err
++++ }
++++
++++ return container, nil
++++}
++++
++++func openLog(name string) error {
++++ f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755)
++++ if err != nil {
++++ return err
++++ }
++++
++++ log.SetOutput(f)
++++
++++ return nil
++++}
++++
++++func findUserArgs() []string {
++++ i := 0
++++ for _, a := range os.Args {
++++ i++
++++
++++ if a == "--" {
++++ break
++++ }
++++ }
++++
++++ return os.Args[i:]
++++}
++++
++++// loadConfigFromFd loads a container's config from the sync pipe that is provided by
++++// fd 3 when running a process
++++func loadConfigFromFd() (*libcontainer.Config, error) {
++++ syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ var config *libcontainer.Config
++++ if err := syncPipe.ReadFromParent(&config); err != nil {
++++ return nil, err
++++ }
++++
++++ return config, nil
++++}
++++
++++func preload(context *cli.Context) error {
++++ if logPath != "" {
++++ if err := openLog(logPath); err != nil {
++++ return err
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++func runFunc(f *rFunc) {
++++ userArgs := findUserArgs()
++++
++++ config, err := loadConfigFromFd()
++++ if err != nil {
++++ log.Fatalf("unable to receive config from sync pipe: %s", err)
++++ }
++++
++++ f.Action(config, userArgs)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libcontainer
++++
++++import "io"
++++
++++// Configuration for a process to be run inside a container.
++++type ProcessConfig struct {
++++ // The command to be run followed by any arguments.
++++ Args []string
++++
++++ // Map of environment variables to their values.
++++ Env []string
++++
++++ // Stdin is a pointer to a reader which provides the standard input stream.
++++ // Stdout is a pointer to a writer which receives the standard output stream.
++++ // Stderr is a pointer to a writer which receives the standard error stream.
++++ //
++++ // If a reader or writer is nil, the input stream is assumed to be empty and the output is
++++ // discarded.
++++ //
++++ // The readers and writers, if supplied, are closed when the process terminates. Their Close
++++ // methods should be idempotent.
++++ //
++++ // Stdout and Stderr may refer to the same writer in which case the output is interspersed.
++++ Stdin io.ReadCloser
++++ Stdout io.WriteCloser
++++ Stderr io.WriteCloser
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++These configuration files can be used with `nsinit` to quickly develop, test,
++++and experiment with features of libcontainer.
++++
++++When consuming these configuration files, copy them into your rootfs and rename
++++the file to `container.json` for use with `nsinit`.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++{
++++ "capabilities": [
++++ "CHOWN",
++++ "DAC_OVERRIDE",
++++ "FOWNER",
++++ "MKNOD",
++++ "NET_RAW",
++++ "SETGID",
++++ "SETUID",
++++ "SETFCAP",
++++ "SETPCAP",
++++ "NET_BIND_SERVICE",
++++ "SYS_CHROOT",
++++ "KILL"
++++ ],
++++ "cgroups": {
++++ "allowed_devices": [
++++ {
++++ "cgroup_permissions": "m",
++++ "major_number": -1,
++++ "minor_number": -1,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "m",
++++ "major_number": -1,
++++ "minor_number": -1,
++++ "type": 98
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 5,
++++ "minor_number": 1,
++++ "path": "/dev/console",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 4,
++++ "path": "/dev/tty0",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 4,
++++ "minor_number": 1,
++++ "path": "/dev/tty1",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 136,
++++ "minor_number": -1,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 5,
++++ "minor_number": 2,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 10,
++++ "minor_number": 200,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 3,
++++ "path": "/dev/null",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 5,
++++ "path": "/dev/zero",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 7,
++++ "path": "/dev/full",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 5,
++++ "path": "/dev/tty",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 9,
++++ "path": "/dev/urandom",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 8,
++++ "path": "/dev/random",
++++ "type": 99
++++ }
++++ ],
++++ "name": "docker-koye",
++++ "parent": "docker"
++++ },
++++ "restrict_sys": true,
++++ "apparmor_profile": "docker-default",
++++ "mount_config": {
++++ "device_nodes": [
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 3,
++++ "path": "/dev/null",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 5,
++++ "path": "/dev/zero",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 7,
++++ "path": "/dev/full",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 5,
++++ "path": "/dev/tty",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 9,
++++ "path": "/dev/urandom",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 8,
++++ "path": "/dev/random",
++++ "type": 99
++++ }
++++ ]
++++ },
++++ "environment": [
++++ "HOME=/",
++++ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
++++ "HOSTNAME=koye",
++++ "TERM=xterm"
++++ ],
++++ "hostname": "koye",
++++ "namespaces": {
++++ "NEWIPC": true,
++++ "NEWNET": true,
++++ "NEWNS": true,
++++ "NEWPID": true,
++++ "NEWUTS": true
++++ },
++++ "networks": [
++++ {
++++ "address": "127.0.0.1/0",
++++ "gateway": "localhost",
++++ "mtu": 1500,
++++ "type": "loopback"
++++ }
++++ ],
++++ "tty": true,
++++ "user": "daemon"
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++{
++++ "capabilities": [
++++ "CHOWN",
++++ "DAC_OVERRIDE",
++++ "FOWNER",
++++ "MKNOD",
++++ "NET_RAW",
++++ "SETGID",
++++ "SETUID",
++++ "SETFCAP",
++++ "SETPCAP",
++++ "NET_BIND_SERVICE",
++++ "SYS_CHROOT",
++++ "KILL"
++++ ],
++++ "cgroups": {
++++ "allowed_devices": [
++++ {
++++ "cgroup_permissions": "m",
++++ "major_number": -1,
++++ "minor_number": -1,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "m",
++++ "major_number": -1,
++++ "minor_number": -1,
++++ "type": 98
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 5,
++++ "minor_number": 1,
++++ "path": "/dev/console",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 4,
++++ "path": "/dev/tty0",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 4,
++++ "minor_number": 1,
++++ "path": "/dev/tty1",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 136,
++++ "minor_number": -1,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 5,
++++ "minor_number": 2,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 10,
++++ "minor_number": 200,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 3,
++++ "path": "/dev/null",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 5,
++++ "path": "/dev/zero",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 7,
++++ "path": "/dev/full",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 5,
++++ "path": "/dev/tty",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 9,
++++ "path": "/dev/urandom",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 8,
++++ "path": "/dev/random",
++++ "type": 99
++++ }
++++ ],
++++ "name": "docker-koye",
++++ "parent": "docker"
++++ },
++++ "restrict_sys": true,
++++ "mount_config": {
++++ "device_nodes": [
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 3,
++++ "path": "/dev/null",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 5,
++++ "path": "/dev/zero",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 7,
++++ "path": "/dev/full",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 5,
++++ "path": "/dev/tty",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 9,
++++ "path": "/dev/urandom",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 8,
++++ "path": "/dev/random",
++++ "type": 99
++++ }
++++ ]
++++ },
++++ "environment": [
++++ "HOME=/",
++++ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
++++ "HOSTNAME=koye",
++++ "TERM=xterm"
++++ ],
++++ "hostname": "koye",
++++ "namespaces": {
++++ "NEWIPC": true,
++++ "NEWNET": true,
++++ "NEWNS": true,
++++ "NEWPID": true,
++++ "NEWUTS": true
++++ },
++++ "networks": [
++++ {
++++ "address": "127.0.0.1/0",
++++ "gateway": "localhost",
++++ "mtu": 1500,
++++ "type": "loopback"
++++ },
++++ {
++++ "address": "172.17.0.101/16",
++++ "bridge": "docker0",
++++ "veth_prefix": "veth",
++++ "gateway": "172.17.42.1",
++++ "mtu": 1500,
++++ "type": "veth"
++++ }
++++ ],
++++ "tty": true
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++{
++++ "capabilities": [
++++ "CHOWN",
++++ "DAC_OVERRIDE",
++++ "FOWNER",
++++ "MKNOD",
++++ "NET_RAW",
++++ "SETGID",
++++ "SETUID",
++++ "SETFCAP",
++++ "SETPCAP",
++++ "NET_BIND_SERVICE",
++++ "SYS_CHROOT",
++++ "KILL"
++++ ],
++++ "cgroups": {
++++ "allowed_devices": [
++++ {
++++ "cgroup_permissions": "m",
++++ "major_number": -1,
++++ "minor_number": -1,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "m",
++++ "major_number": -1,
++++ "minor_number": -1,
++++ "type": 98
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 5,
++++ "minor_number": 1,
++++ "path": "/dev/console",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 4,
++++ "path": "/dev/tty0",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 4,
++++ "minor_number": 1,
++++ "path": "/dev/tty1",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 136,
++++ "minor_number": -1,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 5,
++++ "minor_number": 2,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 10,
++++ "minor_number": 200,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 3,
++++ "path": "/dev/null",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 5,
++++ "path": "/dev/zero",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 7,
++++ "path": "/dev/full",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 5,
++++ "path": "/dev/tty",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 9,
++++ "path": "/dev/urandom",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 8,
++++ "path": "/dev/random",
++++ "type": 99
++++ }
++++ ],
++++ "name": "docker-koye",
++++ "parent": "docker"
++++ },
++++ "restrict_sys": true,
++++ "mount_config": {
++++ "device_nodes": [
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 3,
++++ "path": "/dev/null",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 5,
++++ "path": "/dev/zero",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 7,
++++ "path": "/dev/full",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 5,
++++ "path": "/dev/tty",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 9,
++++ "path": "/dev/urandom",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 8,
++++ "path": "/dev/random",
++++ "type": 99
++++ }
++++ ],
++++ "mounts": [
++++ {
++++ "type": "tmpfs",
++++ "destination": "/tmp"
++++ }
++++ ]
++++ },
++++ "environment": [
++++ "HOME=/",
++++ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
++++ "HOSTNAME=koye",
++++ "TERM=xterm"
++++ ],
++++ "hostname": "koye",
++++ "namespaces": {
++++ "NEWIPC": true,
++++ "NEWNET": true,
++++ "NEWNS": true,
++++ "NEWPID": true,
++++ "NEWUTS": true
++++ },
++++ "networks": [
++++ {
++++ "address": "127.0.0.1/0",
++++ "gateway": "localhost",
++++ "mtu": 1500,
++++ "type": "loopback"
++++ }
++++ ],
++++ "tty": true,
++++ "user": "daemon"
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++{
++++ "capabilities": [
++++ "CHOWN",
++++ "DAC_OVERRIDE",
++++ "FOWNER",
++++ "MKNOD",
++++ "NET_RAW",
++++ "SETGID",
++++ "SETUID",
++++ "SETFCAP",
++++ "SETPCAP",
++++ "NET_BIND_SERVICE",
++++ "SYS_CHROOT",
++++ "KILL"
++++ ],
++++ "cgroups": {
++++ "allowed_devices": [
++++ {
++++ "cgroup_permissions": "m",
++++ "major_number": -1,
++++ "minor_number": -1,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "m",
++++ "major_number": -1,
++++ "minor_number": -1,
++++ "type": 98
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 5,
++++ "minor_number": 1,
++++ "path": "/dev/console",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 4,
++++ "path": "/dev/tty0",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 4,
++++ "minor_number": 1,
++++ "path": "/dev/tty1",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 136,
++++ "minor_number": -1,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 5,
++++ "minor_number": 2,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "major_number": 10,
++++ "minor_number": 200,
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 3,
++++ "path": "/dev/null",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 5,
++++ "path": "/dev/zero",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 7,
++++ "path": "/dev/full",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 5,
++++ "path": "/dev/tty",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 9,
++++ "path": "/dev/urandom",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 8,
++++ "path": "/dev/random",
++++ "type": 99
++++ }
++++ ],
++++ "name": "docker-koye",
++++ "parent": "docker"
++++ },
++++ "restrict_sys": true,
++++ "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475",
++++ "mount_config": {
++++ "mount_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475",
++++ "device_nodes": [
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 3,
++++ "path": "/dev/null",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 5,
++++ "path": "/dev/zero",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 7,
++++ "path": "/dev/full",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 5,
++++ "path": "/dev/tty",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 9,
++++ "path": "/dev/urandom",
++++ "type": 99
++++ },
++++ {
++++ "cgroup_permissions": "rwm",
++++ "file_mode": 438,
++++ "major_number": 1,
++++ "minor_number": 8,
++++ "path": "/dev/random",
++++ "type": 99
++++ }
++++ ]
++++ },
++++ "environment": [
++++ "HOME=/",
++++ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
++++ "HOSTNAME=koye",
++++ "TERM=xterm"
++++ ],
++++ "hostname": "koye",
++++ "namespaces": {
++++ "NEWIPC": true,
++++ "NEWNET": true,
++++ "NEWNS": true,
++++ "NEWPID": true,
++++ "NEWUTS": true
++++ },
++++ "networks": [
++++ {
++++ "address": "127.0.0.1/0",
++++ "gateway": "localhost",
++++ "mtu": 1500,
++++ "type": "loopback"
++++ }
++++ ],
++++ "tty": true,
++++ "user": "daemon"
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package capabilities
++++
++++import (
++++ "os"
++++
++++ "github.com/syndtr/gocapability/capability"
++++)
++++
++++const allCapabilityTypes = capability.CAPS | capability.BOUNDS
++++
++++// DropBoundingSet drops the capability bounding set to those specified in the
++++// container configuration.
++++func DropBoundingSet(capabilities []string) error {
++++ c, err := capability.NewPid(os.Getpid())
++++ if err != nil {
++++ return err
++++ }
++++
++++ keep := getEnabledCapabilities(capabilities)
++++ c.Clear(capability.BOUNDS)
++++ c.Set(capability.BOUNDS, keep...)
++++
++++ if err := c.Apply(capability.BOUNDS); err != nil {
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++// DropCapabilities drops all capabilities for the current process except those specified in the container configuration.
++++func DropCapabilities(capList []string) error {
++++ c, err := capability.NewPid(os.Getpid())
++++ if err != nil {
++++ return err
++++ }
++++
++++ keep := getEnabledCapabilities(capList)
++++ c.Clear(allCapabilityTypes)
++++ c.Set(allCapabilityTypes, keep...)
++++
++++ if err := c.Apply(allCapabilityTypes); err != nil {
++++ return err
++++ }
++++ return nil
++++}
++++
++++// getEnabledCapabilities returns the capabilities that should not be dropped by the container.
++++func getEnabledCapabilities(capList []string) []capability.Cap {
++++ keep := []capability.Cap{}
++++ for _, capability := range capList {
++++ if c := GetCapability(capability); c != nil {
++++ keep = append(keep, c.Value)
++++ }
++++ }
++++ return keep
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package capabilities
++++
++++import "github.com/syndtr/gocapability/capability"
++++
++++type (
++++ CapabilityMapping struct {
++++ Key string `json:"key,omitempty"`
++++ Value capability.Cap `json:"value,omitempty"`
++++ }
++++ Capabilities []*CapabilityMapping
++++)
++++
++++func (c *CapabilityMapping) String() string {
++++ return c.Key
++++}
++++
++++func GetCapability(key string) *CapabilityMapping {
++++ for _, capp := range capabilityList {
++++ if capp.Key == key {
++++ cpy := *capp
++++ return &cpy
++++ }
++++ }
++++ return nil
++++}
++++
++++func GetAllCapabilities() []string {
++++ output := make([]string, len(capabilityList))
++++ for i, capability := range capabilityList {
++++ output[i] = capability.String()
++++ }
++++ return output
++++}
++++
++++// Contains returns true if the specified Capability is
++++// in the slice
++++func (c Capabilities) contains(capp string) bool {
++++ return c.get(capp) != nil
++++}
++++
++++func (c Capabilities) get(capp string) *CapabilityMapping {
++++ for _, cap := range c {
++++ if cap.Key == capp {
++++ return cap
++++ }
++++ }
++++ return nil
++++}
++++
++++var capabilityList = Capabilities{
++++ {Key: "SETPCAP", Value: capability.CAP_SETPCAP},
++++ {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE},
++++ {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO},
++++ {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT},
++++ {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN},
++++ {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE},
++++ {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE},
++++ {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME},
++++ {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG},
++++ {Key: "MKNOD", Value: capability.CAP_MKNOD},
++++ {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE},
++++ {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL},
++++ {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE},
++++ {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN},
++++ {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN},
++++ {Key: "SYSLOG", Value: capability.CAP_SYSLOG},
++++ {Key: "CHOWN", Value: capability.CAP_CHOWN},
++++ {Key: "NET_RAW", Value: capability.CAP_NET_RAW},
++++ {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE},
++++ {Key: "FOWNER", Value: capability.CAP_FOWNER},
++++ {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH},
++++ {Key: "FSETID", Value: capability.CAP_FSETID},
++++ {Key: "KILL", Value: capability.CAP_KILL},
++++ {Key: "SETGID", Value: capability.CAP_SETGID},
++++ {Key: "SETUID", Value: capability.CAP_SETUID},
++++ {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE},
++++ {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE},
++++ {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST},
++++ {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK},
++++ {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER},
++++ {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT},
++++ {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE},
++++ {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT},
++++ {Key: "LEASE", Value: capability.CAP_LEASE},
++++ {Key: "SETFCAP", Value: capability.CAP_SETFCAP},
++++ {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM},
++++ {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND},
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package capabilities
++++
++++import (
++++ "testing"
++++)
++++
++++func TestCapabilitiesContains(t *testing.T) {
++++ caps := Capabilities{
++++ GetCapability("MKNOD"),
++++ GetCapability("SETPCAP"),
++++ }
++++
++++ if caps.contains("SYS_ADMIN") {
++++ t.Fatal("capabilities should not contain SYS_ADMIN")
++++ }
++++ if !caps.contains("MKNOD") {
++++ t.Fatal("capabilities should contain MKNOD but does not")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package restrict
++++
++++import (
++++ "fmt"
++++ "os"
++++ "syscall"
++++ "time"
++++)
++++
++++const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
++++
++++func mountReadonly(path string) error {
++++ for i := 0; i < 5; i++ {
++++ if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) {
++++ switch err {
++++ case syscall.EINVAL:
++++ // Probably not a mountpoint, use bind-mount
++++ if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil {
++++ return err
++++ }
++++
++++ return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "")
++++ case syscall.EBUSY:
++++ time.Sleep(100 * time.Millisecond)
++++ continue
++++ default:
++++ return err
++++ }
++++ }
++++
++++ return nil
++++ }
++++
++++ return fmt.Errorf("unable to mount %s as readonly max retries reached", path)
++++}
++++
++++// This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts).
++++// However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes).
++++func Restrict(mounts ...string) error {
++++ for _, dest := range mounts {
++++ if err := mountReadonly(dest); err != nil {
++++ return fmt.Errorf("unable to remount %s readonly: %s", dest, err)
++++ }
++++ }
++++
++++ if err := syscall.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) {
++++ return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore: %s", err)
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !linux
++++
++++package restrict
++++
++++import "fmt"
++++
++++func Restrict() error {
++++ return fmt.Errorf("not supported")
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package selinux
++++
++++import (
++++ "bufio"
++++ "crypto/rand"
++++ "encoding/binary"
++++ "fmt"
++++ "io"
++++ "os"
++++ "path/filepath"
++++ "regexp"
++++ "strconv"
++++ "strings"
++++ "syscall"
++++
++++ "github.com/docker/docker/pkg/mount"
++++ "github.com/docker/libcontainer/system"
++++)
++++
++++const (
++++ Enforcing = 1
++++ Permissive = 0
++++ Disabled = -1
++++ selinuxDir = "/etc/selinux/"
++++ selinuxConfig = selinuxDir + "config"
++++ selinuxTypeTag = "SELINUXTYPE"
++++ selinuxTag = "SELINUX"
++++ selinuxPath = "/sys/fs/selinux"
++++ xattrNameSelinux = "security.selinux"
++++ stRdOnly = 0x01
++++)
++++
++++var (
++++ assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
++++ spaceRegex = regexp.MustCompile(`^([^=]+) (.*)$`)
++++ mcsList = make(map[string]bool)
++++ selinuxfs = "unknown"
++++ selinuxEnabled = false
++++ selinuxEnabledChecked = false
++++)
++++
++++type SELinuxContext map[string]string
++++
++++// SetDisabled disables selinux support for the package
++++func SetDisabled() {
++++ selinuxEnabled, selinuxEnabledChecked = false, true
++++}
++++
++++func getSelinuxMountPoint() string {
++++ if selinuxfs != "unknown" {
++++ return selinuxfs
++++ }
++++ selinuxfs = ""
++++
++++ mounts, err := mount.GetMounts()
++++ if err != nil {
++++ return selinuxfs
++++ }
++++ for _, mount := range mounts {
++++ if mount.Fstype == "selinuxfs" {
++++ selinuxfs = mount.Mountpoint
++++ break
++++ }
++++ }
++++ if selinuxfs != "" {
++++ var buf syscall.Statfs_t
++++ syscall.Statfs(selinuxfs, &buf)
++++ if (buf.Flags & stRdOnly) == 1 {
++++ selinuxfs = ""
++++ }
++++ }
++++ return selinuxfs
++++}
++++
++++func SelinuxEnabled() bool {
++++ if selinuxEnabledChecked {
++++ return selinuxEnabled
++++ }
++++ selinuxEnabledChecked = true
++++ if fs := getSelinuxMountPoint(); fs != "" {
++++ if con, _ := Getcon(); con != "kernel" {
++++ selinuxEnabled = true
++++ }
++++ }
++++ return selinuxEnabled
++++}
++++
++++func readConfig(target string) (value string) {
++++ var (
++++ val, key string
++++ bufin *bufio.Reader
++++ )
++++
++++ in, err := os.Open(selinuxConfig)
++++ if err != nil {
++++ return ""
++++ }
++++ defer in.Close()
++++
++++ bufin = bufio.NewReader(in)
++++
++++ for done := false; !done; {
++++ var line string
++++ if line, err = bufin.ReadString('\n'); err != nil {
++++ if err != io.EOF {
++++ return ""
++++ }
++++ done = true
++++ }
++++ line = strings.TrimSpace(line)
++++ if len(line) == 0 {
++++ // Skip blank lines
++++ continue
++++ }
++++ if line[0] == ';' || line[0] == '#' {
++++ // Skip comments
++++ continue
++++ }
++++ if groups := assignRegex.FindStringSubmatch(line); groups != nil {
++++ key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
++++ if key == target {
++++ return strings.Trim(val, "\"")
++++ }
++++ }
++++ }
++++ return ""
++++}
++++
++++func getSELinuxPolicyRoot() string {
++++ return selinuxDir + readConfig(selinuxTypeTag)
++++}
++++
++++func readCon(name string) (string, error) {
++++ var val string
++++
++++ in, err := os.Open(name)
++++ if err != nil {
++++ return "", err
++++ }
++++ defer in.Close()
++++
++++ _, err = fmt.Fscanf(in, "%s", &val)
++++ return val, err
++++}
++++
++++func Setfilecon(path string, scon string) error {
++++ return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
++++}
++++
++++// Return the SELinux label for this path
++++func Getfilecon(path string) (string, error) {
++++ con, err := system.Lgetxattr(path, xattrNameSelinux)
++++ return string(con), err
++++}
++++
++++func Setfscreatecon(scon string) error {
++++ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon)
++++}
++++
++++func Getfscreatecon() (string, error) {
++++ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()))
++++}
++++
++++// Return the SELinux label of the current process thread.
++++func Getcon() (string, error) {
++++ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()))
++++}
++++
++++func Getpidcon(pid int) (string, error) {
++++ return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
++++}
++++
++++func Getexeccon() (string, error) {
++++ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
++++}
++++
++++func writeCon(name string, val string) error {
++++ out, err := os.OpenFile(name, os.O_WRONLY, 0)
++++ if err != nil {
++++ return err
++++ }
++++ defer out.Close()
++++
++++ if val != "" {
++++ _, err = out.Write([]byte(val))
++++ } else {
++++ _, err = out.Write(nil)
++++ }
++++ return err
++++}
++++
++++func Setexeccon(scon string) error {
++++ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon)
++++}
++++
++++func (c SELinuxContext) Get() string {
++++ return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
++++}
++++
++++func NewContext(scon string) SELinuxContext {
++++ c := make(SELinuxContext)
++++
++++ if len(scon) != 0 {
++++ con := strings.SplitN(scon, ":", 4)
++++ c["user"] = con[0]
++++ c["role"] = con[1]
++++ c["type"] = con[2]
++++ c["level"] = con[3]
++++ }
++++ return c
++++}
++++
++++func ReserveLabel(scon string) {
++++ if len(scon) != 0 {
++++ con := strings.SplitN(scon, ":", 4)
++++ mcsAdd(con[3])
++++ }
++++}
++++
++++func SelinuxGetEnforce() int {
++++ var enforce int
++++
++++ enforceS, err := readCon(fmt.Sprintf("%s/enforce", selinuxPath))
++++ if err != nil {
++++ return -1
++++ }
++++
++++ enforce, err = strconv.Atoi(string(enforceS))
++++ if err != nil {
++++ return -1
++++ }
++++ return enforce
++++}
++++
++++func SelinuxGetEnforceMode() int {
++++ switch readConfig(selinuxTag) {
++++ case "enforcing":
++++ return Enforcing
++++ case "permissive":
++++ return Permissive
++++ }
++++ return Disabled
++++}
++++
++++func mcsAdd(mcs string) error {
++++ if mcsList[mcs] {
++++ return fmt.Errorf("MCS Label already exists")
++++ }
++++ mcsList[mcs] = true
++++ return nil
++++}
++++
++++func mcsDelete(mcs string) {
++++ mcsList[mcs] = false
++++}
++++
++++func mcsExists(mcs string) bool {
++++ return mcsList[mcs]
++++}
++++
++++func IntToMcs(id int, catRange uint32) string {
++++ var (
++++ SETSIZE = int(catRange)
++++ TIER = SETSIZE
++++ ORD = id
++++ )
++++
++++ if id < 1 || id > 523776 {
++++ return ""
++++ }
++++
++++ for ORD > TIER {
++++ ORD = ORD - TIER
++++ TIER -= 1
++++ }
++++ TIER = SETSIZE - TIER
++++ ORD = ORD + TIER
++++ return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
++++}
++++
++++func uniqMcs(catRange uint32) string {
++++ var (
++++ n uint32
++++ c1, c2 uint32
++++ mcs string
++++ )
++++
++++ for {
++++ binary.Read(rand.Reader, binary.LittleEndian, &n)
++++ c1 = n % catRange
++++ binary.Read(rand.Reader, binary.LittleEndian, &n)
++++ c2 = n % catRange
++++ if c1 == c2 {
++++ continue
++++ } else {
++++ if c1 > c2 {
++++ t := c1
++++ c1 = c2
++++ c2 = t
++++ }
++++ }
++++ mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
++++ if err := mcsAdd(mcs); err != nil {
++++ continue
++++ }
++++ break
++++ }
++++ return mcs
++++}
++++
++++func FreeLxcContexts(scon string) {
++++ if len(scon) != 0 {
++++ con := strings.SplitN(scon, ":", 4)
++++ mcsDelete(con[3])
++++ }
++++}
++++
++++func GetLxcContexts() (processLabel string, fileLabel string) {
++++ var (
++++ val, key string
++++ bufin *bufio.Reader
++++ )
++++
++++ if !SelinuxEnabled() {
++++ return "", ""
++++ }
++++ lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot())
++++ in, err := os.Open(lxcPath)
++++ if err != nil {
++++ return "", ""
++++ }
++++ defer in.Close()
++++
++++ bufin = bufio.NewReader(in)
++++
++++ for done := false; !done; {
++++ var line string
++++ if line, err = bufin.ReadString('\n'); err != nil {
++++ if err == io.EOF {
++++ done = true
++++ } else {
++++ goto exit
++++ }
++++ }
++++ line = strings.TrimSpace(line)
++++ if len(line) == 0 {
++++ // Skip blank lines
++++ continue
++++ }
++++ if line[0] == ';' || line[0] == '#' {
++++ // Skip comments
++++ continue
++++ }
++++ if groups := assignRegex.FindStringSubmatch(line); groups != nil {
++++ key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
++++ if key == "process" {
++++ processLabel = strings.Trim(val, "\"")
++++ }
++++ if key == "file" {
++++ fileLabel = strings.Trim(val, "\"")
++++ }
++++ }
++++ }
++++
++++ if processLabel == "" || fileLabel == "" {
++++ return "", ""
++++ }
++++
++++exit:
++++ // mcs := IntToMcs(os.Getpid(), 1024)
++++ mcs := uniqMcs(1024)
++++ scon := NewContext(processLabel)
++++ scon["level"] = mcs
++++ processLabel = scon.Get()
++++ scon = NewContext(fileLabel)
++++ scon["level"] = mcs
++++ fileLabel = scon.Get()
++++ return processLabel, fileLabel
++++}
++++
++++func SecurityCheckContext(val string) error {
++++ return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
++++}
++++
++++func CopyLevel(src, dest string) (string, error) {
++++ if src == "" {
++++ return "", nil
++++ }
++++ if err := SecurityCheckContext(src); err != nil {
++++ return "", err
++++ }
++++ if err := SecurityCheckContext(dest); err != nil {
++++ return "", err
++++ }
++++ scon := NewContext(src)
++++ tcon := NewContext(dest)
++++ mcsDelete(tcon["level"])
++++ mcsAdd(scon["level"])
++++ tcon["level"] = scon["level"]
++++ return tcon.Get(), nil
++++}
++++
++++// Prevent users from relabing system files
++++func badPrefix(fpath string) error {
++++ var badprefixes = []string{"/usr"}
++++
++++ for _, prefix := range badprefixes {
++++ if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
++++ return fmt.Errorf("Relabeling content in %s is not allowed.", prefix)
++++ }
++++ }
++++ return nil
++++}
++++
++++// Change the fpath file object to the SELinux label scon.
++++// If the fpath is a directory and recurse is true Chcon will walk the
++++// directory tree setting the label
++++func Chcon(fpath string, scon string, recurse bool) error {
++++ if scon == "" {
++++ return nil
++++ }
++++ if err := badPrefix(fpath); err != nil {
++++ return err
++++ }
++++ callback := func(p string, info os.FileInfo, err error) error {
++++ return Setfilecon(p, scon)
++++ }
++++
++++ if recurse {
++++ return filepath.Walk(fpath, callback)
++++ }
++++
++++ return Setfilecon(fpath, scon)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package selinux_test
++++
++++import (
++++ "os"
++++ "testing"
++++
++++ "github.com/docker/libcontainer/selinux"
++++)
++++
++++func testSetfilecon(t *testing.T) {
++++ if selinux.SelinuxEnabled() {
++++ tmp := "selinux_test"
++++ out, _ := os.OpenFile(tmp, os.O_WRONLY, 0)
++++ out.Close()
++++ err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0")
++++ if err != nil {
++++ t.Log("Setfilecon failed")
++++ t.Fatal(err)
++++ }
++++ os.Remove(tmp)
++++ }
++++}
++++
++++func TestSELinux(t *testing.T) {
++++ var (
++++ err error
++++ plabel, flabel string
++++ )
++++
++++ if selinux.SelinuxEnabled() {
++++ t.Log("Enabled")
++++ plabel, flabel = selinux.GetLxcContexts()
++++ t.Log(plabel)
++++ t.Log(flabel)
++++ selinux.FreeLxcContexts(plabel)
++++ plabel, flabel = selinux.GetLxcContexts()
++++ t.Log(plabel)
++++ t.Log(flabel)
++++ selinux.FreeLxcContexts(plabel)
++++ t.Log("getenforce ", selinux.SelinuxGetEnforce())
++++ t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode())
++++ pid := os.Getpid()
++++ t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023))
++++ err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0")
++++ if err == nil {
++++ t.Log(selinux.Getfscreatecon())
++++ } else {
++++ t.Log("setfscreatecon failed", err)
++++ t.Fatal(err)
++++ }
++++ err = selinux.Setfscreatecon("")
++++ if err == nil {
++++ t.Log(selinux.Getfscreatecon())
++++ } else {
++++ t.Log("setfscreatecon failed", err)
++++ t.Fatal(err)
++++ }
++++ t.Log(selinux.Getpidcon(1))
++++ } else {
++++ t.Log("Disabled")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libcontainer
++++
++++import (
++++ "encoding/json"
++++ "os"
++++ "path/filepath"
++++
++++ "github.com/docker/libcontainer/network"
++++)
++++
++++// State represents a running container's state
++++type State struct {
++++ // InitPid is the init process id in the parent namespace
++++ InitPid int `json:"init_pid,omitempty"`
++++
++++ // InitStartTime is the init process start time
++++ InitStartTime string `json:"init_start_time,omitempty"`
++++
++++ // Network runtime state.
++++ NetworkState network.NetworkState `json:"network_state,omitempty"`
++++
++++ // Path to all the cgroups setup for a container. Key is cgroup subsystem name.
++++ CgroupPaths map[string]string `json:"cgroup_paths,omitempty"`
++++}
++++
++++// The running state of the container.
++++type RunState int
++++
++++const (
++++ // The name of the runtime state file
++++ stateFile = "state.json"
++++
++++ // The container exists and is running.
++++ Running RunState = iota
++++
++++ // The container exists, it is in the process of being paused.
++++ Pausing
++++
++++ // The container exists, but all its processes are paused.
++++ Paused
++++
++++ // The container does not exist.
++++ Destroyed
++++)
++++
++++// SaveState writes the container's runtime state to a state.json file
++++// in the specified path
++++func SaveState(basePath string, state *State) error {
++++ f, err := os.Create(filepath.Join(basePath, stateFile))
++++ if err != nil {
++++ return err
++++ }
++++ defer f.Close()
++++
++++ return json.NewEncoder(f).Encode(state)
++++}
++++
++++// GetState reads the state.json file for a running container
++++func GetState(basePath string) (*State, error) {
++++ f, err := os.Open(filepath.Join(basePath, stateFile))
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer f.Close()
++++
++++ var state *State
++++ if err := json.NewDecoder(f).Decode(&state); err != nil {
++++ return nil, err
++++ }
++++
++++ return state, nil
++++}
++++
++++// DeleteState deletes the state.json file
++++func DeleteState(basePath string) error {
++++ return os.Remove(filepath.Join(basePath, stateFile))
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package syncpipe
++++
++++import (
++++ "encoding/json"
++++ "fmt"
++++ "io/ioutil"
++++ "os"
++++ "syscall"
++++)
++++
++++// SyncPipe allows communication to and from the child processes
++++// to it's parent and allows the two independent processes to
++++// syncronize their state.
++++type SyncPipe struct {
++++ parent, child *os.File
++++}
++++
++++func NewSyncPipeFromFd(parentFd, childFd uintptr) (*SyncPipe, error) {
++++ s := &SyncPipe{}
++++
++++ if parentFd > 0 {
++++ s.parent = os.NewFile(parentFd, "parentPipe")
++++ } else if childFd > 0 {
++++ s.child = os.NewFile(childFd, "childPipe")
++++ } else {
++++ return nil, fmt.Errorf("no valid sync pipe fd specified")
++++ }
++++
++++ return s, nil
++++}
++++
++++func (s *SyncPipe) Child() *os.File {
++++ return s.child
++++}
++++
++++func (s *SyncPipe) Parent() *os.File {
++++ return s.parent
++++}
++++
++++func (s *SyncPipe) SendToChild(v interface{}) error {
++++ data, err := json.Marshal(v)
++++ if err != nil {
++++ return err
++++ }
++++
++++ s.parent.Write(data)
++++
++++ return syscall.Shutdown(int(s.parent.Fd()), syscall.SHUT_WR)
++++}
++++
++++func (s *SyncPipe) ReadFromChild() error {
++++ data, err := ioutil.ReadAll(s.parent)
++++ if err != nil {
++++ return err
++++ }
++++
++++ if len(data) > 0 {
++++ return fmt.Errorf("%s", data)
++++ }
++++
++++ return nil
++++}
++++
++++func (s *SyncPipe) ReadFromParent(v interface{}) error {
++++ data, err := ioutil.ReadAll(s.child)
++++ if err != nil {
++++ return fmt.Errorf("error reading from sync pipe %s", err)
++++ }
++++
++++ if len(data) > 0 {
++++ if err := json.Unmarshal(data, v); err != nil {
++++ return err
++++ }
++++ }
++++
++++ return nil
++++}
++++
++++func (s *SyncPipe) ReportChildError(err error) {
++++ // ensure that any data sent from the parent is consumed so it doesn't
++++ // receive ECONNRESET when the child writes to the pipe.
++++ ioutil.ReadAll(s.child)
++++
++++ s.child.Write([]byte(err.Error()))
++++ s.CloseChild()
++++}
++++
++++func (s *SyncPipe) Close() error {
++++ if s.parent != nil {
++++ s.parent.Close()
++++ }
++++
++++ if s.child != nil {
++++ s.child.Close()
++++ }
++++
++++ return nil
++++}
++++
++++func (s *SyncPipe) CloseChild() {
++++ if s.child != nil {
++++ s.child.Close()
++++ s.child = nil
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package syncpipe
++++
++++import (
++++ "os"
++++ "syscall"
++++)
++++
++++func NewSyncPipe() (s *SyncPipe, err error) {
++++ s = &SyncPipe{}
++++
++++ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ s.child = os.NewFile(uintptr(fds[0]), "child syncpipe")
++++ s.parent = os.NewFile(uintptr(fds[1]), "parent syncpipe")
++++
++++ return s, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package syncpipe
++++
++++import (
++++ "fmt"
++++ "syscall"
++++ "testing"
++++)
++++
++++type testStruct struct {
++++ Name string
++++}
++++
++++func TestSendErrorFromChild(t *testing.T) {
++++ pipe, err := NewSyncPipe()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ defer func() {
++++ if err := pipe.Close(); err != nil {
++++ t.Fatal(err)
++++ }
++++ }()
++++
++++ childfd, err := syscall.Dup(int(pipe.Child().Fd()))
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ childPipe, _ := NewSyncPipeFromFd(0, uintptr(childfd))
++++
++++ pipe.CloseChild()
++++ pipe.SendToChild(nil)
++++
++++ expected := "something bad happened"
++++ childPipe.ReportChildError(fmt.Errorf(expected))
++++
++++ childError := pipe.ReadFromChild()
++++ if childError == nil {
++++ t.Fatal("expected an error to be returned but did not receive anything")
++++ }
++++
++++ if childError.Error() != expected {
++++ t.Fatalf("expected %q but received error message %q", expected, childError.Error())
++++ }
++++}
++++
++++func TestSendPayloadToChild(t *testing.T) {
++++ pipe, err := NewSyncPipe()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ defer func() {
++++ if err := pipe.Close(); err != nil {
++++ t.Fatal(err)
++++ }
++++ }()
++++
++++ expected := "libcontainer"
++++
++++ if err := pipe.SendToChild(testStruct{Name: expected}); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ var s *testStruct
++++ if err := pipe.ReadFromParent(&s); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if s.Name != expected {
++++ t.Fatalf("expected name %q but received %q", expected, s.Name)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package system
++++
++++import (
++++ "os/exec"
++++ "syscall"
++++ "unsafe"
++++)
++++
++++func Execv(cmd string, args []string, env []string) error {
++++ name, err := exec.LookPath(cmd)
++++ if err != nil {
++++ return err
++++ }
++++
++++ return syscall.Exec(name, args, env)
++++}
++++
++++func ParentDeathSignal(sig uintptr) error {
++++ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 {
++++ return err
++++ }
++++ return nil
++++}
++++
++++func GetParentDeathSignal() (int, error) {
++++ var sig int
++++
++++ _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0)
++++
++++ if err != 0 {
++++ return -1, err
++++ }
++++
++++ return sig, nil
++++}
++++
++++func SetKeepCaps() error {
++++ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 {
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++func ClearKeepCaps() error {
++++ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 {
++++ return err
++++ }
++++
++++ return nil
++++}
++++
++++func Setctty() error {
++++ if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 {
++++ return err
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package system
++++
++++import (
++++ "io/ioutil"
++++ "path/filepath"
++++ "strconv"
++++ "strings"
++++)
++++
++++// look in /proc to find the process start time so that we can verify
++++// that this pid has started after ourself
++++func GetProcessStartTime(pid int) (string, error) {
++++ data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
++++ if err != nil {
++++ return "", err
++++ }
++++
++++ parts := strings.Split(string(data), " ")
++++ // the starttime is located at pos 22
++++ // from the man page
++++ //
++++ // starttime %llu (was %lu before Linux 2.6)
++++ // (22) The time the process started after system boot. In kernels before Linux 2.6, this
++++ // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks
++++ // (divide by sysconf(_SC_CLK_TCK)).
++++ return parts[22-1], nil // starts at 1
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package system
++++
++++import (
++++ "fmt"
++++ "runtime"
++++ "syscall"
++++)
++++
++++// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092
++++//
++++// We need different setns values for the different platforms and arch
++++// We are declaring the macro here because the SETNS syscall does not exist in th stdlib
++++var setNsMap = map[string]uintptr{
++++ "linux/386": 346,
++++ "linux/amd64": 308,
++++ "linux/arm": 374,
++++}
++++
++++func Setns(fd uintptr, flags uintptr) error {
++++ ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
++++ if !exists {
++++ return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
++++ }
++++
++++ _, _, err := syscall.RawSyscall(ns, fd, flags, 0)
++++ if err != 0 {
++++ return err
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build cgo
++++
++++package system
++++
++++/*
++++#include <unistd.h>
++++*/
++++import "C"
++++
++++func GetClockTicks() int {
++++ return int(C.sysconf(C._SC_CLK_TCK))
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build !cgo
++++
++++package system
++++
++++func GetClockTicks() int {
++++ // TODO figure out a better alternative for platforms where we're missing cgo
++++ return 100
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package system
++++
++++import (
++++ "syscall"
++++ "unsafe"
++++)
++++
++++var _zero uintptr
++++
++++// Returns the size of xattrs and nil error
++++// Requires path, takes allocated []byte or nil as last argument
++++func Llistxattr(path string, dest []byte) (size int, err error) {
++++ pathBytes, err := syscall.BytePtrFromString(path)
++++ if err != nil {
++++ return -1, err
++++ }
++++ var newpathBytes unsafe.Pointer
++++ if len(dest) > 0 {
++++ newpathBytes = unsafe.Pointer(&dest[0])
++++ } else {
++++ newpathBytes = unsafe.Pointer(&_zero)
++++ }
++++
++++ _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0)
++++ size = int(_size)
++++ if errno != 0 {
++++ return -1, errno
++++ }
++++
++++ return size, nil
++++}
++++
++++// Returns a []byte slice if the xattr is set and nil otherwise
++++// Requires path and its attribute as arguments
++++func Lgetxattr(path string, attr string) ([]byte, error) {
++++ var sz int
++++ pathBytes, err := syscall.BytePtrFromString(path)
++++ if err != nil {
++++ return nil, err
++++ }
++++ attrBytes, err := syscall.BytePtrFromString(attr)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ // Start with a 128 length byte array
++++ sz = 128
++++ dest := make([]byte, sz)
++++ destBytes := unsafe.Pointer(&dest[0])
++++ _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
++++
++++ switch {
++++ case errno == syscall.ENODATA:
++++ return nil, errno
++++ case errno == syscall.ENOTSUP:
++++ return nil, errno
++++ case errno == syscall.ERANGE:
++++ // 128 byte array might just not be good enough,
++++ // A dummy buffer is used ``uintptr(0)`` to get real size
++++ // of the xattrs on disk
++++ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0)
++++ sz = int(_sz)
++++ if sz < 0 {
++++ return nil, errno
++++ }
++++ dest = make([]byte, sz)
++++ destBytes := unsafe.Pointer(&dest[0])
++++ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
++++ if errno != 0 {
++++ return nil, errno
++++ }
++++ case errno != 0:
++++ return nil, errno
++++ }
++++ sz = int(_sz)
++++ return dest[:sz], nil
++++}
++++
++++func Lsetxattr(path string, attr string, data []byte, flags int) error {
++++ pathBytes, err := syscall.BytePtrFromString(path)
++++ if err != nil {
++++ return err
++++ }
++++ attrBytes, err := syscall.BytePtrFromString(attr)
++++ if err != nil {
++++ return err
++++ }
++++ var dataBytes unsafe.Pointer
++++ if len(data) > 0 {
++++ dataBytes = unsafe.Pointer(&data[0])
++++ } else {
++++ dataBytes = unsafe.Pointer(&_zero)
++++ }
++++ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
++++ if errno != 0 {
++++ return errno
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libcontainer
++++
++++import (
++++ "github.com/docker/libcontainer/cgroups"
++++ "github.com/docker/libcontainer/network"
++++)
++++
++++type ContainerStats struct {
++++ NetworkStats *network.NetworkStats `json:"network_stats,omitempty"`
++++ CgroupStats *cgroups.Stats `json:"cgroup_stats,omitempty"`
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++#!/usr/bin/env bash
++++set -e
++++
++++cd "$(dirname "$BASH_SOURCE")"
++++
++++# Downloads dependencies into vendor/ directory
++++mkdir -p vendor
++++cd vendor
++++
++++clone() {
++++ vcs=$1
++++ pkg=$2
++++ rev=$3
++++
++++ pkg_url=https://$pkg
++++ target_dir=src/$pkg
++++
++++ echo -n "$pkg @ $rev: "
++++
++++ if [ -d $target_dir ]; then
++++ echo -n 'rm old, '
++++ rm -fr $target_dir
++++ fi
++++
++++ echo -n 'clone, '
++++ case $vcs in
++++ git)
++++ git clone --quiet --no-checkout $pkg_url $target_dir
++++ ( cd $target_dir && git reset --quiet --hard $rev )
++++ ;;
++++ hg)
++++ hg clone --quiet --updaterev $rev $pkg_url $target_dir
++++ ;;
++++ esac
++++
++++ echo -n 'rm VCS, '
++++ ( cd $target_dir && rm -rf .{git,hg} )
++++
++++ echo done
++++}
++++
++++# the following lines are in sorted order, FYI
++++clone git github.com/codegangsta/cli 1.1.0
++++clone git github.com/coreos/go-systemd v2
++++clone git github.com/godbus/dbus v1
++++clone git github.com/syndtr/gocapability 3c85049eae
++++
++++# intentionally not vendoring Docker itself... that'd be a circle :)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Tianon Gravi <admwiggin@gmail.com> (@tianon)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package user
++++
++++import (
++++ "bufio"
++++ "fmt"
++++ "io"
++++ "os"
++++ "strconv"
++++ "strings"
++++)
++++
++++const (
++++ minId = 0
++++ maxId = 1<<31 - 1 //for 32-bit systems compatibility
++++)
++++
++++var (
++++ ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId)
++++)
++++
++++type User struct {
++++ Name string
++++ Pass string
++++ Uid int
++++ Gid int
++++ Gecos string
++++ Home string
++++ Shell string
++++}
++++
++++type Group struct {
++++ Name string
++++ Pass string
++++ Gid int
++++ List []string
++++}
++++
++++func parseLine(line string, v ...interface{}) {
++++ if line == "" {
++++ return
++++ }
++++
++++ parts := strings.Split(line, ":")
++++ for i, p := range parts {
++++ if len(v) <= i {
++++ // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files
++++ break
++++ }
++++
++++ switch e := v[i].(type) {
++++ case *string:
++++ // "root", "adm", "/bin/bash"
++++ *e = p
++++ case *int:
++++ // "0", "4", "1000"
++++ // ignore string to int conversion errors, for great "tolerance" of naughty configuration files
++++ *e, _ = strconv.Atoi(p)
++++ case *[]string:
++++ // "", "root", "root,adm,daemon"
++++ if p != "" {
++++ *e = strings.Split(p, ",")
++++ } else {
++++ *e = []string{}
++++ }
++++ default:
++++ // panic, because this is a programming/logic error, not a runtime one
++++ panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!")
++++ }
++++ }
++++}
++++
++++func ParsePasswd() ([]*User, error) {
++++ return ParsePasswdFilter(nil)
++++}
++++
++++func ParsePasswdFilter(filter func(*User) bool) ([]*User, error) {
++++ f, err := os.Open("/etc/passwd")
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer f.Close()
++++ return parsePasswdFile(f, filter)
++++}
++++
++++func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) {
++++ var (
++++ s = bufio.NewScanner(r)
++++ out = []*User{}
++++ )
++++
++++ for s.Scan() {
++++ if err := s.Err(); err != nil {
++++ return nil, err
++++ }
++++
++++ text := strings.TrimSpace(s.Text())
++++ if text == "" {
++++ continue
++++ }
++++
++++ // see: man 5 passwd
++++ // name:password:UID:GID:GECOS:directory:shell
++++ // Name:Pass:Uid:Gid:Gecos:Home:Shell
++++ // root:x:0:0:root:/root:/bin/bash
++++ // adm:x:3:4:adm:/var/adm:/bin/false
++++ p := &User{}
++++ parseLine(
++++ text,
++++ &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell,
++++ )
++++
++++ if filter == nil || filter(p) {
++++ out = append(out, p)
++++ }
++++ }
++++
++++ return out, nil
++++}
++++
++++func ParseGroup() ([]*Group, error) {
++++ return ParseGroupFilter(nil)
++++}
++++
++++func ParseGroupFilter(filter func(*Group) bool) ([]*Group, error) {
++++ f, err := os.Open("/etc/group")
++++ if err != nil {
++++ return nil, err
++++ }
++++ defer f.Close()
++++ return parseGroupFile(f, filter)
++++}
++++
++++func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) {
++++ var (
++++ s = bufio.NewScanner(r)
++++ out = []*Group{}
++++ )
++++
++++ for s.Scan() {
++++ if err := s.Err(); err != nil {
++++ return nil, err
++++ }
++++
++++ text := s.Text()
++++ if text == "" {
++++ continue
++++ }
++++
++++ // see: man 5 group
++++ // group_name:password:GID:user_list
++++ // Name:Pass:Gid:List
++++ // root:x:0:root
++++ // adm:x:4:root,adm,daemon
++++ p := &Group{}
++++ parseLine(
++++ text,
++++ &p.Name, &p.Pass, &p.Gid, &p.List,
++++ )
++++
++++ if filter == nil || filter(p) {
++++ out = append(out, p)
++++ }
++++ }
++++
++++ return out, nil
++++}
++++
++++// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, list of supplementary group IDs, and home directory, if available and/or applicable.
++++func GetUserGroupSupplementaryHome(userSpec string, defaultUid, defaultGid int, defaultHome string) (int, int, []int, string, error) {
++++ var (
++++ uid = defaultUid
++++ gid = defaultGid
++++ suppGids = []int{}
++++ home = defaultHome
++++
++++ userArg, groupArg string
++++ )
++++
++++ // allow for userArg to have either "user" syntax, or optionally "user:group" syntax
++++ parseLine(userSpec, &userArg, &groupArg)
++++
++++ users, err := ParsePasswdFilter(func(u *User) bool {
++++ if userArg == "" {
++++ return u.Uid == uid
++++ }
++++ return u.Name == userArg || strconv.Itoa(u.Uid) == userArg
++++ })
++++ if err != nil && !os.IsNotExist(err) {
++++ if userArg == "" {
++++ userArg = strconv.Itoa(uid)
++++ }
++++ return 0, 0, nil, "", fmt.Errorf("Unable to find user %v: %v", userArg, err)
++++ }
++++
++++ haveUser := users != nil && len(users) > 0
++++ if haveUser {
++++ // if we found any user entries that matched our filter, let's take the first one as "correct"
++++ uid = users[0].Uid
++++ gid = users[0].Gid
++++ home = users[0].Home
++++ } else if userArg != "" {
++++ // we asked for a user but didn't find them... let's check to see if we wanted a numeric user
++++ uid, err = strconv.Atoi(userArg)
++++ if err != nil {
++++ // not numeric - we have to bail
++++ return 0, 0, nil, "", fmt.Errorf("Unable to find user %v", userArg)
++++ }
++++ if uid < minId || uid > maxId {
++++ return 0, 0, nil, "", ErrRange
++++ }
++++
++++ // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit
++++ }
++++
++++ if groupArg != "" || (haveUser && users[0].Name != "") {
++++ groups, err := ParseGroupFilter(func(g *Group) bool {
++++ if groupArg != "" {
++++ return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg
++++ }
++++ for _, u := range g.List {
++++ if u == users[0].Name {
++++ return true
++++ }
++++ }
++++ return false
++++ })
++++ if err != nil && !os.IsNotExist(err) {
++++ return 0, 0, nil, "", fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err)
++++ }
++++
++++ haveGroup := groups != nil && len(groups) > 0
++++ if groupArg != "" {
++++ if haveGroup {
++++ // if we found any group entries that matched our filter, let's take the first one as "correct"
++++ gid = groups[0].Gid
++++ } else {
++++ // we asked for a group but didn't find id... let's check to see if we wanted a numeric group
++++ gid, err = strconv.Atoi(groupArg)
++++ if err != nil {
++++ // not numeric - we have to bail
++++ return 0, 0, nil, "", fmt.Errorf("Unable to find group %v", groupArg)
++++ }
++++ if gid < minId || gid > maxId {
++++ return 0, 0, nil, "", ErrRange
++++ }
++++
++++ // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit
++++ }
++++ } else if haveGroup {
++++ suppGids = make([]int, len(groups))
++++ for i, group := range groups {
++++ suppGids[i] = group.Gid
++++ }
++++ }
++++ }
++++
++++ return uid, gid, suppGids, home, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package user
++++
++++import (
++++ "strings"
++++ "testing"
++++)
++++
++++func TestUserParseLine(t *testing.T) {
++++ var (
++++ a, b string
++++ c []string
++++ d int
++++ )
++++
++++ parseLine("", &a, &b)
++++ if a != "" || b != "" {
++++ t.Fatalf("a and b should be empty ('%v', '%v')", a, b)
++++ }
++++
++++ parseLine("a", &a, &b)
++++ if a != "a" || b != "" {
++++ t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b)
++++ }
++++
++++ parseLine("bad boys:corny cows", &a, &b)
++++ if a != "bad boys" || b != "corny cows" {
++++ t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b)
++++ }
++++
++++ parseLine("", &c)
++++ if len(c) != 0 {
++++ t.Fatalf("c should be empty (%#v)", c)
++++ }
++++
++++ parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c)
++++ if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" {
++++ t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c)
++++ }
++++
++++ parseLine("::::::::::", &a, &b, &c)
++++ if a != "" || b != "" || len(c) != 0 {
++++ t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c)
++++ }
++++
++++ parseLine("not a number", &d)
++++ if d != 0 {
++++ t.Fatalf("d should be 0 (%v)", d)
++++ }
++++
++++ parseLine("b:12:c", &a, &d, &b)
++++ if a != "b" || b != "c" || d != 12 {
++++ t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d)
++++ }
++++}
++++
++++func TestUserParsePasswd(t *testing.T) {
++++ users, err := parsePasswdFile(strings.NewReader(`
++++root:x:0:0:root:/root:/bin/bash
++++adm:x:3:4:adm:/var/adm:/bin/false
++++this is just some garbage data
++++`), nil)
++++ if err != nil {
++++ t.Fatalf("Unexpected error: %v", err)
++++ }
++++ if len(users) != 3 {
++++ t.Fatalf("Expected 3 users, got %v", len(users))
++++ }
++++ if users[0].Uid != 0 || users[0].Name != "root" {
++++ t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name)
++++ }
++++ if users[1].Uid != 3 || users[1].Name != "adm" {
++++ t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name)
++++ }
++++}
++++
++++func TestUserParseGroup(t *testing.T) {
++++ groups, err := parseGroupFile(strings.NewReader(`
++++root:x:0:root
++++adm:x:4:root,adm,daemon
++++this is just some garbage data
++++`), nil)
++++ if err != nil {
++++ t.Fatalf("Unexpected error: %v", err)
++++ }
++++ if len(groups) != 3 {
++++ t.Fatalf("Expected 3 groups, got %v", len(groups))
++++ }
++++ if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 {
++++ t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List))
++++ }
++++ if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 {
++++ t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List))
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package utils
++++
++++import (
++++ "crypto/rand"
++++ "encoding/hex"
++++ "io"
++++ "io/ioutil"
++++ "path/filepath"
++++ "strconv"
++++ "syscall"
++++)
++++
++++// GenerateRandomName returns a new name joined with a prefix. This size
++++// specified is used to truncate the randomly generated value
++++func GenerateRandomName(prefix string, size int) (string, error) {
++++ id := make([]byte, 32)
++++ if _, err := io.ReadFull(rand.Reader, id); err != nil {
++++ return "", err
++++ }
++++ return prefix + hex.EncodeToString(id)[:size], nil
++++}
++++
++++// ResolveRootfs ensures that the current working directory is
++++// not a symlink and returns the absolute path to the rootfs
++++func ResolveRootfs(uncleanRootfs string) (string, error) {
++++ rootfs, err := filepath.Abs(uncleanRootfs)
++++ if err != nil {
++++ return "", err
++++ }
++++ return filepath.EvalSymlinks(rootfs)
++++}
++++
++++func CloseExecFrom(minFd int) error {
++++ fdList, err := ioutil.ReadDir("/proc/self/fd")
++++ if err != nil {
++++ return err
++++ }
++++ for _, fi := range fdList {
++++ fd, err := strconv.Atoi(fi.Name())
++++ if err != nil {
++++ // ignore non-numeric file names
++++ continue
++++ }
++++
++++ if fd < minFd {
++++ // ignore descriptors lower than our specified minimum
++++ continue
++++ }
++++
++++ // intentionally ignore errors from syscall.CloseOnExec
++++ syscall.CloseOnExec(fd)
++++ // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall)
++++ }
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package xattr
++++
++++import (
++++ "syscall"
++++
++++ "github.com/docker/libcontainer/system"
++++)
++++
++++func XattrEnabled(path string) bool {
++++ if Setxattr(path, "user.test", "") == syscall.ENOTSUP {
++++ return false
++++ }
++++ return true
++++}
++++
++++func stringsfromByte(buf []byte) (result []string) {
++++ offset := 0
++++ for index, b := range buf {
++++ if b == 0 {
++++ result = append(result, string(buf[offset:index]))
++++ offset = index + 1
++++ }
++++ }
++++ return
++++}
++++
++++func Listxattr(path string) ([]string, error) {
++++ size, err := system.Llistxattr(path, nil)
++++ if err != nil {
++++ return nil, err
++++ }
++++ buf := make([]byte, size)
++++ read, err := system.Llistxattr(path, buf)
++++ if err != nil {
++++ return nil, err
++++ }
++++ names := stringsfromByte(buf[:read])
++++ return names, nil
++++}
++++
++++func Getxattr(path, attr string) (string, error) {
++++ value, err := system.Lgetxattr(path, attr)
++++ if err != nil {
++++ return "", err
++++ }
++++ return string(value), nil
++++}
++++
++++func Setxattr(path, xattr, value string) error {
++++ return system.Lsetxattr(path, xattr, []byte(value), 0)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++// +build linux
++++
++++package xattr_test
++++
++++import (
++++ "os"
++++ "testing"
++++
++++ "github.com/docker/libcontainer/xattr"
++++)
++++
++++func testXattr(t *testing.T) {
++++ tmp := "xattr_test"
++++ out, err := os.OpenFile(tmp, os.O_WRONLY, 0)
++++ if err != nil {
++++ t.Fatal("failed")
++++ }
++++ attr := "user.test"
++++ out.Close()
++++
++++ if !xattr.XattrEnabled(tmp) {
++++ t.Log("Disabled")
++++ t.Fatal("failed")
++++ }
++++ t.Log("Success")
++++
++++ err = xattr.Setxattr(tmp, attr, "test")
++++ if err != nil {
++++ t.Fatal("failed")
++++ }
++++
++++ var value string
++++ value, err = xattr.Getxattr(tmp, attr)
++++ if err != nil {
++++ t.Fatal("failed")
++++ }
++++ if value != "test" {
++++ t.Fatal("failed")
++++ }
++++ t.Log("Success")
++++
++++ var names []string
++++ names, err = xattr.Listxattr(tmp)
++++ if err != nil {
++++ t.Fatal("failed")
++++ }
++++
++++ var found int
++++ for _, name := range names {
++++ if name == attr {
++++ found = 1
++++ }
++++ }
++++ // Listxattr doesn't return trusted.* and system.* namespace
++++ // attrs when run in unprevileged mode.
++++ if found != 1 {
++++ t.Fatal("failed")
++++ }
++++ t.Log("Success")
++++
++++ big := "0000000000000000000000000000000000000000000000000000000000000000000008c6419ad822dfe29283fb3ac98dcc5908810cb31f4cfe690040c42c144b7492eicompslf20dxmlpgz"
++++ // Test for long xattrs larger than 128 bytes
++++ err = xattr.Setxattr(tmp, attr, big)
++++ if err != nil {
++++ t.Fatal("failed to add long value")
++++ }
++++ value, err = xattr.Getxattr(tmp, attr)
++++ if err != nil {
++++ t.Fatal("failed to get long value")
++++ }
++++ t.Log("Success")
++++
++++ if value != big {
++++ t.Fatal("failed, value doesn't match")
++++ }
++++ t.Log("Success")
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++# Contributing to libtrust
++++
++++Want to hack on libtrust? Awesome! Here are instructions to get you
++++started.
++++
++++libtrust is a part of the [Docker](https://www.docker.com) project, and follows
++++the same rules and principles. If you're already familiar with the way
++++Docker does things, you'll feel right at home.
++++
++++Otherwise, go read
++++[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
++++
++++Happy hacking!
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++
++++ Apache License
++++ Version 2.0, January 2004
++++ http://www.apache.org/licenses/
++++
++++ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++++
++++ 1. Definitions.
++++
++++ "License" shall mean the terms and conditions for use, reproduction,
++++ and distribution as defined by Sections 1 through 9 of this document.
++++
++++ "Licensor" shall mean the copyright owner or entity authorized by
++++ the copyright owner that is granting the License.
++++
++++ "Legal Entity" shall mean the union of the acting entity and all
++++ other entities that control, are controlled by, or are under common
++++ control with that entity. For the purposes of this definition,
++++ "control" means (i) the power, direct or indirect, to cause the
++++ direction or management of such entity, whether by contract or
++++ otherwise, or (ii) ownership of fifty percent (50%) or more of the
++++ outstanding shares, or (iii) beneficial ownership of such entity.
++++
++++ "You" (or "Your") shall mean an individual or Legal Entity
++++ exercising permissions granted by this License.
++++
++++ "Source" form shall mean the preferred form for making modifications,
++++ including but not limited to software source code, documentation
++++ source, and configuration files.
++++
++++ "Object" form shall mean any form resulting from mechanical
++++ transformation or translation of a Source form, including but
++++ not limited to compiled object code, generated documentation,
++++ and conversions to other media types.
++++
++++ "Work" shall mean the work of authorship, whether in Source or
++++ Object form, made available under the License, as indicated by a
++++ copyright notice that is included in or attached to the work
++++ (an example is provided in the Appendix below).
++++
++++ "Derivative Works" shall mean any work, whether in Source or Object
++++ form, that is based on (or derived from) the Work and for which the
++++ editorial revisions, annotations, elaborations, or other modifications
++++ represent, as a whole, an original work of authorship. For the purposes
++++ of this License, Derivative Works shall not include works that remain
++++ separable from, or merely link (or bind by name) to the interfaces of,
++++ the Work and Derivative Works thereof.
++++
++++ "Contribution" shall mean any work of authorship, including
++++ the original version of the Work and any modifications or additions
++++ to that Work or Derivative Works thereof, that is intentionally
++++ submitted to Licensor for inclusion in the Work by the copyright owner
++++ or by an individual or Legal Entity authorized to submit on behalf of
++++ the copyright owner. For the purposes of this definition, "submitted"
++++ means any form of electronic, verbal, or written communication sent
++++ to the Licensor or its representatives, including but not limited to
++++ communication on electronic mailing lists, source code control systems,
++++ and issue tracking systems that are managed by, or on behalf of, the
++++ Licensor for the purpose of discussing and improving the Work, but
++++ excluding communication that is conspicuously marked or otherwise
++++ designated in writing by the copyright owner as "Not a Contribution."
++++
++++ "Contributor" shall mean Licensor and any individual or Legal Entity
++++ on behalf of whom a Contribution has been received by Licensor and
++++ subsequently incorporated within the Work.
++++
++++ 2. Grant of Copyright License. Subject to the terms and conditions of
++++ this License, each Contributor hereby grants to You a perpetual,
++++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++++ copyright license to reproduce, prepare Derivative Works of,
++++ publicly display, publicly perform, sublicense, and distribute the
++++ Work and such Derivative Works in Source or Object form.
++++
++++ 3. Grant of Patent License. Subject to the terms and conditions of
++++ this License, each Contributor hereby grants to You a perpetual,
++++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++++ (except as stated in this section) patent license to make, have made,
++++ use, offer to sell, sell, import, and otherwise transfer the Work,
++++ where such license applies only to those patent claims licensable
++++ by such Contributor that are necessarily infringed by their
++++ Contribution(s) alone or by combination of their Contribution(s)
++++ with the Work to which such Contribution(s) was submitted. If You
++++ institute patent litigation against any entity (including a
++++ cross-claim or counterclaim in a lawsuit) alleging that the Work
++++ or a Contribution incorporated within the Work constitutes direct
++++ or contributory patent infringement, then any patent licenses
++++ granted to You under this License for that Work shall terminate
++++ as of the date such litigation is filed.
++++
++++ 4. Redistribution. You may reproduce and distribute copies of the
++++ Work or Derivative Works thereof in any medium, with or without
++++ modifications, and in Source or Object form, provided that You
++++ meet the following conditions:
++++
++++ (a) You must give any other recipients of the Work or
++++ Derivative Works a copy of this License; and
++++
++++ (b) You must cause any modified files to carry prominent notices
++++ stating that You changed the files; and
++++
++++ (c) You must retain, in the Source form of any Derivative Works
++++ that You distribute, all copyright, patent, trademark, and
++++ attribution notices from the Source form of the Work,
++++ excluding those notices that do not pertain to any part of
++++ the Derivative Works; and
++++
++++ (d) If the Work includes a "NOTICE" text file as part of its
++++ distribution, then any Derivative Works that You distribute must
++++ include a readable copy of the attribution notices contained
++++ within such NOTICE file, excluding those notices that do not
++++ pertain to any part of the Derivative Works, in at least one
++++ of the following places: within a NOTICE text file distributed
++++ as part of the Derivative Works; within the Source form or
++++ documentation, if provided along with the Derivative Works; or,
++++ within a display generated by the Derivative Works, if and
++++ wherever such third-party notices normally appear. The contents
++++ of the NOTICE file are for informational purposes only and
++++ do not modify the License. You may add Your own attribution
++++ notices within Derivative Works that You distribute, alongside
++++ or as an addendum to the NOTICE text from the Work, provided
++++ that such additional attribution notices cannot be construed
++++ as modifying the License.
++++
++++ You may add Your own copyright statement to Your modifications and
++++ may provide additional or different license terms and conditions
++++ for use, reproduction, or distribution of Your modifications, or
++++ for any such Derivative Works as a whole, provided Your use,
++++ reproduction, and distribution of the Work otherwise complies with
++++ the conditions stated in this License.
++++
++++ 5. Submission of Contributions. Unless You explicitly state otherwise,
++++ any Contribution intentionally submitted for inclusion in the Work
++++ by You to the Licensor shall be under the terms and conditions of
++++ this License, without any additional terms or conditions.
++++ Notwithstanding the above, nothing herein shall supersede or modify
++++ the terms of any separate license agreement you may have executed
++++ with Licensor regarding such Contributions.
++++
++++ 6. Trademarks. This License does not grant permission to use the trade
++++ names, trademarks, service marks, or product names of the Licensor,
++++ except as required for reasonable and customary use in describing the
++++ origin of the Work and reproducing the content of the NOTICE file.
++++
++++ 7. Disclaimer of Warranty. Unless required by applicable law or
++++ agreed to in writing, Licensor provides the Work (and each
++++ Contributor provides its Contributions) on an "AS IS" BASIS,
++++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
++++ implied, including, without limitation, any warranties or conditions
++++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
++++ PARTICULAR PURPOSE. You are solely responsible for determining the
++++ appropriateness of using or redistributing the Work and assume any
++++ risks associated with Your exercise of permissions under this License.
++++
++++ 8. Limitation of Liability. In no event and under no legal theory,
++++ whether in tort (including negligence), contract, or otherwise,
++++ unless required by applicable law (such as deliberate and grossly
++++ negligent acts) or agreed to in writing, shall any Contributor be
++++ liable to You for damages, including any direct, indirect, special,
++++ incidental, or consequential damages of any character arising as a
++++ result of this License or out of the use or inability to use the
++++ Work (including but not limited to damages for loss of goodwill,
++++ work stoppage, computer failure or malfunction, or any and all
++++ other commercial damages or losses), even if such Contributor
++++ has been advised of the possibility of such damages.
++++
++++ 9. Accepting Warranty or Additional Liability. While redistributing
++++ the Work or Derivative Works thereof, You may choose to offer,
++++ and charge a fee for, acceptance of support, warranty, indemnity,
++++ or other liability obligations and/or rights consistent with this
++++ License. However, in accepting such obligations, You may act only
++++ on Your own behalf and on Your sole responsibility, not on behalf
++++ of any other Contributor, and only if You agree to indemnify,
++++ defend, and hold each Contributor harmless for any liability
++++ incurred by, or claims asserted against, such Contributor by reason
++++ of your accepting any such warranty or additional liability.
++++
++++ END OF TERMS AND CONDITIONS
++++
++++ Copyright 2014 Docker, Inc.
++++
++++ Licensed under the Apache License, Version 2.0 (the "License");
++++ you may not use this file except in compliance with the License.
++++ You may obtain a copy of the License at
++++
++++ http://www.apache.org/licenses/LICENSE-2.0
++++
++++ Unless required by applicable law or agreed to in writing, software
++++ distributed under the License is distributed on an "AS IS" BASIS,
++++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++++ See the License for the specific language governing permissions and
++++ limitations under the License.
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++Solomon Hykes <solomon@docker.com>
++++Josh Hawn <josh@docker.com> (github: jlhawn)
++++Derek McGowan <derek@docker.com> (github: dmcgowan)
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++# libtrust
++++
++++Libtrust is library for managing authentication and authorization using public key cryptography.
++++
++++Authentication is handled using the identity attached to the public key.
++++Libtrust provides multiple methods to prove possession of the private key associated with an identity.
++++ - TLS x509 certificates
++++ - Signature verification
++++ - Key Challenge
++++
++++Authorization and access control is managed through a distributed trust graph.
++++Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
++++
++++## Copyright and license
++++
++++Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
++++Docs released under Creative commons.
++++
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "crypto/rand"
++++ "crypto/x509"
++++ "crypto/x509/pkix"
++++ "encoding/pem"
++++ "fmt"
++++ "io/ioutil"
++++ "math/big"
++++ "net"
++++ "time"
++++)
++++
++++type certTemplateInfo struct {
++++ commonName string
++++ domains []string
++++ ipAddresses []net.IP
++++ isCA bool
++++ clientAuth bool
++++ serverAuth bool
++++}
++++
++++func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
++++ // Generate a certificate template which is valid from the past week to
++++ // 10 years from now. The usage of the certificate depends on the
++++ // specified fields in the given certTempInfo object.
++++ var (
++++ keyUsage x509.KeyUsage
++++ extKeyUsage []x509.ExtKeyUsage
++++ )
++++
++++ if info.isCA {
++++ keyUsage = x509.KeyUsageCertSign
++++ }
++++
++++ if info.clientAuth {
++++ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
++++ }
++++
++++ if info.serverAuth {
++++ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
++++ }
++++
++++ return &x509.Certificate{
++++ SerialNumber: big.NewInt(0),
++++ Subject: pkix.Name{
++++ CommonName: info.commonName,
++++ },
++++ NotBefore: time.Now().Add(-time.Hour * 24 * 7),
++++ NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
++++ DNSNames: info.domains,
++++ IPAddresses: info.ipAddresses,
++++ IsCA: info.isCA,
++++ KeyUsage: keyUsage,
++++ ExtKeyUsage: extKeyUsage,
++++ BasicConstraintsValid: info.isCA,
++++ }
++++}
++++
++++func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
++++ pubCertTemplate := generateCertTemplate(subInfo)
++++ privCertTemplate := generateCertTemplate(issInfo)
++++
++++ certDER, err := x509.CreateCertificate(
++++ rand.Reader, pubCertTemplate, privCertTemplate,
++++ pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
++++ )
++++ if err != nil {
++++ return nil, fmt.Errorf("failed to create certificate: %s", err)
++++ }
++++
++++ cert, err = x509.ParseCertificate(certDER)
++++ if err != nil {
++++ return nil, fmt.Errorf("failed to parse certificate: %s", err)
++++ }
++++
++++ return
++++}
++++
++++// GenerateSelfSignedServerCert creates a self-signed certificate for the
++++// given key which is to be used for TLS servers with the given domains and
++++// IP addresses.
++++func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
++++ info := &certTemplateInfo{
++++ commonName: key.KeyID(),
++++ domains: domains,
++++ ipAddresses: ipAddresses,
++++ serverAuth: true,
++++ }
++++
++++ return generateCert(key.PublicKey(), key, info, info)
++++}
++++
++++// GenerateSelfSignedClientCert creates a self-signed certificate for the
++++// given key which is to be used for TLS clients.
++++func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
++++ info := &certTemplateInfo{
++++ commonName: key.KeyID(),
++++ clientAuth: true,
++++ }
++++
++++ return generateCert(key.PublicKey(), key, info, info)
++++}
++++
++++// GenerateCACert creates a certificate which can be used as a trusted
++++// certificate authority.
++++func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
++++ subjectInfo := &certTemplateInfo{
++++ commonName: trustedKey.KeyID(),
++++ isCA: true,
++++ }
++++ issuerInfo := &certTemplateInfo{
++++ commonName: signer.KeyID(),
++++ }
++++
++++ return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
++++}
++++
++++// GenerateCACertPool creates a certificate authority pool to be used for a
++++// TLS configuration. Any self-signed certificates issued by the specified
++++// trusted keys will be verified during a TLS handshake
++++func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
++++ certPool := x509.NewCertPool()
++++
++++ for _, trustedKey := range trustedKeys {
++++ cert, err := GenerateCACert(signer, trustedKey)
++++ if err != nil {
++++ return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
++++ }
++++
++++ certPool.AddCert(cert)
++++ }
++++
++++ return certPool, nil
++++}
++++
++++// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
++++// containing one or more certificates. The expected pem type is "CERTIFICATE".
++++func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
++++ b, err := ioutil.ReadFile(filename)
++++ if err != nil {
++++ return nil, err
++++ }
++++ certificates := []*x509.Certificate{}
++++ var block *pem.Block
++++ block, b = pem.Decode(b)
++++ for ; block != nil; block, b = pem.Decode(b) {
++++ if block.Type == "CERTIFICATE" {
++++ cert, err := x509.ParseCertificate(block.Bytes)
++++ if err != nil {
++++ return nil, err
++++ }
++++ certificates = append(certificates, cert)
++++ } else {
++++ return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
++++ }
++++ }
++++
++++ return certificates, nil
++++}
++++
++++// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
++++// containing one or more certificates. The expected pem type is "CERTIFICATE".
++++func LoadCertificatePool(filename string) (*x509.CertPool, error) {
++++ certs, err := LoadCertificateBundle(filename)
++++ if err != nil {
++++ return nil, err
++++ }
++++ pool := x509.NewCertPool()
++++ for _, cert := range certs {
++++ pool.AddCert(cert)
++++ }
++++ return pool, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "encoding/pem"
++++ "io/ioutil"
++++ "net"
++++ "os"
++++ "path"
++++ "testing"
++++)
++++
++++func TestGenerateCertificates(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")})
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ _, err = GenerateSelfSignedClientCert(key)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++}
++++
++++func TestGenerateCACertPool(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ caKey1, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ caKey2, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()})
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++}
++++
++++func TestLoadCertificates(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ caKey1, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ caKey2, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ cert1, err := GenerateCACert(caKey1, key)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ cert2, err := GenerateCACert(caKey2, key)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ d, err := ioutil.TempDir("/tmp", "cert-test")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ caFile := path.Join(d, "ca.pem")
++++ f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw})
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw})
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ f.Close()
++++
++++ certs, err := LoadCertificateBundle(caFile)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ if len(certs) != 2 {
++++ t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs))
++++ }
++++
++++ pool, err := LoadCertificatePool(caFile)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if len(pool.Subjects()) != 2 {
++++ t.Fatalf("Invalid certificate pool")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++/*
++++Package libtrust provides an interface for managing authentication and
++++authorization using public key cryptography. Authentication is handled
++++using the identity attached to the public key and verified through TLS
++++x509 certificates, a key challenge, or signature. Authorization and
++++access control is managed through a trust graph distributed between
++++both remote trust servers and locally cached and managed data.
++++*/
++++package libtrust
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "crypto"
++++ "crypto/ecdsa"
++++ "crypto/elliptic"
++++ "crypto/rand"
++++ "crypto/x509"
++++ "encoding/json"
++++ "encoding/pem"
++++ "errors"
++++ "fmt"
++++ "io"
++++ "math/big"
++++)
++++
++++/*
++++ * EC DSA PUBLIC KEY
++++ */
++++
++++// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
++++// signature algorithms.
++++type ecPublicKey struct {
++++ *ecdsa.PublicKey
++++ curveName string
++++ signatureAlgorithm *signatureAlgorithm
++++ extended map[string]interface{}
++++}
++++
++++func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
++++ curve := cryptoPublicKey.Curve
++++
++++ switch {
++++ case curve == elliptic.P256():
++++ return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
++++ case curve == elliptic.P384():
++++ return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
++++ case curve == elliptic.P521():
++++ return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
++++ default:
++++ return nil, errors.New("unsupported elliptic curve")
++++ }
++++}
++++
++++// KeyType returns the key type for elliptic curve keys, i.e., "EC".
++++func (k *ecPublicKey) KeyType() string {
++++ return "EC"
++++}
++++
++++// CurveName returns the elliptic curve identifier.
++++// Possible values are "P-256", "P-384", and "P-521".
++++func (k *ecPublicKey) CurveName() string {
++++ return k.curveName
++++}
++++
++++// KeyID returns a distinct identifier which is unique to this Public Key.
++++func (k *ecPublicKey) KeyID() string {
++++ // Generate and return a libtrust fingerprint of the EC public key.
++++ // For an EC key this should be:
++++ // SHA256("EC"+curveName+bytes(X)+bytes(Y))
++++ // Then truncated to 240 bits and encoded into 12 base32 groups like so:
++++ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
++++ hasher := crypto.SHA256.New()
++++ hasher.Write([]byte(k.KeyType() + k.CurveName()))
++++ hasher.Write(k.X.Bytes())
++++ hasher.Write(k.Y.Bytes())
++++ return keyIDEncode(hasher.Sum(nil)[:30])
++++}
++++
++++func (k *ecPublicKey) String() string {
++++ return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
++++}
++++
++++// Verify verifyies the signature of the data in the io.Reader using this
++++// PublicKey. The alg parameter should identify the digital signature
++++// algorithm which was used to produce the signature and should be supported
++++// by this public key. Returns a nil error if the signature is valid.
++++func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
++++ // For EC keys there is only one supported signature algorithm depending
++++ // on the curve parameters.
++++ if k.signatureAlgorithm.HeaderParam() != alg {
++++ return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
++++ }
++++
++++ // signature is the concatenation of (r, s), base64Url encoded.
++++ sigLength := len(signature)
++++ expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
++++ if sigLength != expectedOctetLength {
++++ return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
++++ }
++++
++++ rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
++++ r := new(big.Int).SetBytes(rBytes)
++++ s := new(big.Int).SetBytes(sBytes)
++++
++++ hasher := k.signatureAlgorithm.HashID().New()
++++ _, err := io.Copy(hasher, data)
++++ if err != nil {
++++ return fmt.Errorf("error reading data to sign: %s", err)
++++ }
++++ hash := hasher.Sum(nil)
++++
++++ if !ecdsa.Verify(k.PublicKey, hash, r, s) {
++++ return errors.New("invalid signature")
++++ }
++++
++++ return nil
++++}
++++
++++// CryptoPublicKey returns the internal object which can be used as a
++++// crypto.PublicKey for use with other standard library operations. The type
++++// is either *rsa.PublicKey or *ecdsa.PublicKey
++++func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
++++ return k.PublicKey
++++}
++++
++++func (k *ecPublicKey) toMap() map[string]interface{} {
++++ jwk := make(map[string]interface{})
++++ for k, v := range k.extended {
++++ jwk[k] = v
++++ }
++++ jwk["kty"] = k.KeyType()
++++ jwk["kid"] = k.KeyID()
++++ jwk["crv"] = k.CurveName()
++++
++++ xBytes := k.X.Bytes()
++++ yBytes := k.Y.Bytes()
++++ octetLength := (k.Params().BitSize + 7) >> 3
++++ // MUST include leading zeros in the output so that x, y are each
++++ // *octetLength* bytes long.
++++ xBuf := make([]byte, octetLength-len(xBytes), octetLength)
++++ yBuf := make([]byte, octetLength-len(yBytes), octetLength)
++++ xBuf = append(xBuf, xBytes...)
++++ yBuf = append(yBuf, yBytes...)
++++
++++ jwk["x"] = joseBase64UrlEncode(xBuf)
++++ jwk["y"] = joseBase64UrlEncode(yBuf)
++++
++++ return jwk
++++}
++++
++++// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
++++// elliptic curve keys.
++++func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
++++ return json.Marshal(k.toMap())
++++}
++++
++++// PEMBlock serializes this Public Key to DER-encoded PKIX format.
++++func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
++++ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
++++ }
++++ k.extended["keyID"] = k.KeyID() // For display purposes.
++++ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
++++}
++++
++++func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
++++ k.extended[field] = value
++++}
++++
++++func (k *ecPublicKey) GetExtendedField(field string) interface{} {
++++ v, ok := k.extended[field]
++++ if !ok {
++++ return nil
++++ }
++++ return v
++++}
++++
++++func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
++++ // JWK key type (kty) has already been determined to be "EC".
++++ // Need to extract 'crv', 'x', 'y', and 'kid' and check for
++++ // consistency.
++++
++++ // Get the curve identifier value.
++++ crv, err := stringFromMap(jwk, "crv")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
++++ }
++++
++++ var (
++++ curve elliptic.Curve
++++ sigAlg *signatureAlgorithm
++++ )
++++
++++ switch {
++++ case crv == "P-256":
++++ curve = elliptic.P256()
++++ sigAlg = es256
++++ case crv == "P-384":
++++ curve = elliptic.P384()
++++ sigAlg = es384
++++ case crv == "P-521":
++++ curve = elliptic.P521()
++++ sigAlg = es512
++++ default:
++++ return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
++++ }
++++
++++ // Get the X and Y coordinates for the public key point.
++++ xB64Url, err := stringFromMap(jwk, "x")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
++++ }
++++ x, err := parseECCoordinate(xB64Url, curve)
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
++++ }
++++
++++ yB64Url, err := stringFromMap(jwk, "y")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
++++ }
++++ y, err := parseECCoordinate(yB64Url, curve)
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
++++ }
++++
++++ key := &ecPublicKey{
++++ PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
++++ curveName: crv, signatureAlgorithm: sigAlg,
++++ }
++++
++++ // Key ID is optional too, but if it exists, it should match the key.
++++ _, ok := jwk["kid"]
++++ if ok {
++++ kid, err := stringFromMap(jwk, "kid")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
++++ }
++++ if kid != key.KeyID() {
++++ return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
++++ }
++++ }
++++
++++ key.extended = jwk
++++
++++ return key, nil
++++}
++++
++++/*
++++ * EC DSA PRIVATE KEY
++++ */
++++
++++// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
++++// algorithms.
++++type ecPrivateKey struct {
++++ ecPublicKey
++++ *ecdsa.PrivateKey
++++}
++++
++++func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
++++ publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
++++}
++++
++++// PublicKey returns the Public Key data associated with this Private Key.
++++func (k *ecPrivateKey) PublicKey() PublicKey {
++++ return &k.ecPublicKey
++++}
++++
++++func (k *ecPrivateKey) String() string {
++++ return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
++++}
++++
++++// Sign signs the data read from the io.Reader using a signature algorithm supported
++++// by the elliptic curve private key. If the specified hashing algorithm is
++++// supported by this key, that hash function is used to generate the signature
++++// otherwise the the default hashing algorithm for this key is used. Returns
++++// the signature and the name of the JWK signature algorithm used, e.g.,
++++// "ES256", "ES384", "ES512".
++++func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
++++ // Generate a signature of the data using the internal alg.
++++ // The given hashId is only a suggestion, and since EC keys only support
++++ // on signature/hash algorithm given the curve name, we disregard it for
++++ // the elliptic curve JWK signature implementation.
++++ hasher := k.signatureAlgorithm.HashID().New()
++++ _, err = io.Copy(hasher, data)
++++ if err != nil {
++++ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
++++ }
++++ hash := hasher.Sum(nil)
++++
++++ r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
++++ if err != nil {
++++ return nil, "", fmt.Errorf("error producing signature: %s", err)
++++ }
++++ rBytes, sBytes := r.Bytes(), s.Bytes()
++++ octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
++++ // MUST include leading zeros in the output
++++ rBuf := make([]byte, octetLength-len(rBytes), octetLength)
++++ sBuf := make([]byte, octetLength-len(sBytes), octetLength)
++++
++++ rBuf = append(rBuf, rBytes...)
++++ sBuf = append(sBuf, sBytes...)
++++
++++ signature = append(rBuf, sBuf...)
++++ alg = k.signatureAlgorithm.HeaderParam()
++++
++++ return
++++}
++++
++++// CryptoPrivateKey returns the internal object which can be used as a
++++// crypto.PublicKey for use with other standard library operations. The type
++++// is either *rsa.PublicKey or *ecdsa.PublicKey
++++func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
++++ return k.PrivateKey
++++}
++++
++++func (k *ecPrivateKey) toMap() map[string]interface{} {
++++ jwk := k.ecPublicKey.toMap()
++++
++++ dBytes := k.D.Bytes()
++++ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
++++ // octets (where n is the order of the curve). This is because the private
++++ // key d must be in the interval [1, n-1] so the bitlength of d should be
++++ // no larger than the bitlength of n-1. The easiest way to find the octet
++++ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
++++ // bit sequence right by 3, which is essentially dividing by 8 and adding
++++ // 1 if there is any remainder. Thus, the private key value d should be
++++ // output to (bitlength(n-1)+7)>>3 octets.
++++ n := k.ecPublicKey.Params().N
++++ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
++++ // Create a buffer with the necessary zero-padding.
++++ dBuf := make([]byte, octetLength-len(dBytes), octetLength)
++++ dBuf = append(dBuf, dBytes...)
++++
++++ jwk["d"] = joseBase64UrlEncode(dBuf)
++++
++++ return jwk
++++}
++++
++++// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
++++// elliptic curve keys.
++++func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
++++ return json.Marshal(k.toMap())
++++}
++++
++++// PEMBlock serializes this Private Key to DER-encoded PKIX format.
++++func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
++++ derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
++++ }
++++ k.extended["keyID"] = k.KeyID() // For display purposes.
++++ return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
++++}
++++
++++func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
++++ dB64Url, err := stringFromMap(jwk, "d")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK EC Private Key: %s", err)
++++ }
++++
++++ // JWK key type (kty) has already been determined to be "EC".
++++ // Need to extract the public key information, then extract the private
++++ // key value 'd'.
++++ publicKey, err := ecPublicKeyFromMap(jwk)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
++++ }
++++
++++ key := &ecPrivateKey{
++++ ecPublicKey: *publicKey,
++++ PrivateKey: &ecdsa.PrivateKey{
++++ PublicKey: *publicKey.PublicKey,
++++ D: d,
++++ },
++++ }
++++
++++ return key, nil
++++}
++++
++++/*
++++ * Key Generation Functions.
++++ */
++++
++++func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
++++ k = new(ecPrivateKey)
++++ k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
++++ k.extended = make(map[string]interface{})
++++
++++ return
++++}
++++
++++// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
++++func GenerateECP256PrivateKey() (PrivateKey, error) {
++++ k, err := generateECPrivateKey(elliptic.P256())
++++ if err != nil {
++++ return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
++++ }
++++
++++ k.curveName = "P-256"
++++ k.signatureAlgorithm = es256
++++
++++ return k, nil
++++}
++++
++++// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
++++func GenerateECP384PrivateKey() (PrivateKey, error) {
++++ k, err := generateECPrivateKey(elliptic.P384())
++++ if err != nil {
++++ return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
++++ }
++++
++++ k.curveName = "P-384"
++++ k.signatureAlgorithm = es384
++++
++++ return k, nil
++++}
++++
++++// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
++++func GenerateECP521PrivateKey() (PrivateKey, error) {
++++ k, err := generateECPrivateKey(elliptic.P521())
++++ if err != nil {
++++ return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
++++ }
++++
++++ k.curveName = "P-521"
++++ k.signatureAlgorithm = es512
++++
++++ return k, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "bytes"
++++ "encoding/json"
++++ "testing"
++++)
++++
++++func generateECTestKeys(t *testing.T) []PrivateKey {
++++ p256Key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ p384Key, err := GenerateECP384PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ p521Key, err := GenerateECP521PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ return []PrivateKey{p256Key, p384Key, p521Key}
++++}
++++
++++func TestECKeys(t *testing.T) {
++++ ecKeys := generateECTestKeys(t)
++++
++++ for _, ecKey := range ecKeys {
++++ if ecKey.KeyType() != "EC" {
++++ t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType())
++++ }
++++ }
++++}
++++
++++func TestECSignVerify(t *testing.T) {
++++ ecKeys := generateECTestKeys(t)
++++
++++ message := "Hello, World!"
++++ data := bytes.NewReader([]byte(message))
++++
++++ sigAlgs := []*signatureAlgorithm{es256, es384, es512}
++++
++++ for i, ecKey := range ecKeys {
++++ sigAlg := sigAlgs[i]
++++
++++ t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID())
++++
++++ data.Seek(0, 0) // Reset the byte reader
++++
++++ // Sign
++++ sig, alg, err := ecKey.Sign(data, sigAlg.HashID())
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ data.Seek(0, 0) // Reset the byte reader
++++
++++ // Verify
++++ err = ecKey.Verify(data, alg, sig)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ }
++++}
++++
++++func TestMarshalUnmarshalECKeys(t *testing.T) {
++++ ecKeys := generateECTestKeys(t)
++++ data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test."))
++++ sigAlgs := []*signatureAlgorithm{es256, es384, es512}
++++
++++ for i, ecKey := range ecKeys {
++++ sigAlg := sigAlgs[i]
++++ privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ t.Logf("JWK Private Key: %s", string(privateJWKJSON))
++++ t.Logf("JWK Public Key: %s", string(publicJWKJSON))
++++
++++ privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ // Ensure we can sign/verify a message with the unmarshalled keys.
++++ data.Seek(0, 0) // Reset the byte reader
++++ signature, alg, err := privKey2.Sign(data, sigAlg.HashID())
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ data.Seek(0, 0) // Reset the byte reader
++++ err = pubKey2.Verify(data, alg, signature)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ }
++++}
++++
++++func TestFromCryptoECKeys(t *testing.T) {
++++ ecKeys := generateECTestKeys(t)
++++
++++ for _, ecKey := range ecKeys {
++++ cryptoPrivateKey := ecKey.CryptoPrivateKey()
++++ cryptoPublicKey := ecKey.CryptoPublicKey()
++++
++++ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if pubKey.KeyID() != ecKey.KeyID() {
++++ t.Fatal("public key key ID mismatch")
++++ }
++++
++++ privKey, err := FromCryptoPrivateKey(cryptoPrivateKey)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if privKey.KeyID() != ecKey.KeyID() {
++++ t.Fatal("public key key ID mismatch")
++++ }
++++ }
++++}
++++
++++func TestExtendedFields(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ key.AddExtendedField("test", "foobar")
++++ val := key.GetExtendedField("test")
++++
++++ gotVal, ok := val.(string)
++++ if !ok {
++++ t.Fatalf("value is not a string")
++++ } else if gotVal != val {
++++ t.Fatalf("value %q is not equal to %q", gotVal, val)
++++ }
++++
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "path/filepath"
++++)
++++
++++// FilterByHosts filters the list of PublicKeys to only those which contain a
++++// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
++++// then keys which do not specify any hosts are also returned.
++++func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
++++ filtered := make([]PublicKey, 0, len(keys))
++++
++++ for _, pubKey := range keys {
++++ hosts, ok := pubKey.GetExtendedField("hosts").([]interface{})
++++
++++ if !ok || (ok && len(hosts) == 0) {
++++ if includeEmpty {
++++ filtered = append(filtered, pubKey)
++++ }
++++ continue
++++ }
++++
++++ // Check if any hosts match pattern
++++ for _, hostVal := range hosts {
++++ hostPattern, ok := hostVal.(string)
++++ if !ok {
++++ continue
++++ }
++++
++++ match, err := filepath.Match(hostPattern, host)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ if match {
++++ filtered = append(filtered, pubKey)
++++ continue
++++ }
++++ }
++++
++++ }
++++
++++ return filtered, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "testing"
++++)
++++
++++func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) {
++++ if len(sliceA) != len(sliceB) {
++++ t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB))
++++ }
++++
++++ for i, itemA := range sliceA {
++++ itemB := sliceB[i]
++++ if itemA != itemB {
++++ t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB)
++++ }
++++ }
++++}
++++
++++func TestFilter(t *testing.T) {
++++ keys := make([]PublicKey, 0, 8)
++++
++++ // Create 8 keys and add host entries.
++++ for i := 0; i < cap(keys); i++ {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ switch {
++++ case i == 0:
++++ // Don't add entries for this key, key 0.
++++ break
++++ case i%2 == 0:
++++ // Should catch keys 2, 4, and 6.
++++ key.AddExtendedField("hosts", []interface{}{"*.even.example.com"})
++++ case i == 7:
++++ // Should catch only the last key, and make it match any hostname.
++++ key.AddExtendedField("hosts", []interface{}{"*"})
++++ default:
++++ // should catch keys 1, 3, 5.
++++ key.AddExtendedField("hosts", []interface{}{"*.example.com"})
++++ }
++++
++++ keys = append(keys, key)
++++ }
++++
++++ // Should match 2 keys, the empty one, and the one that matches all hosts.
++++ matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ expectedMatch := []PublicKey{keys[0], keys[7]}
++++ compareKeySlices(t, expectedMatch, matchedKeys)
++++
++++ // Should match 1 key, the one that matches any host.
++++ matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ expectedMatch = []PublicKey{keys[7]}
++++ compareKeySlices(t, expectedMatch, matchedKeys)
++++
++++ // Should match keys that end in "example.com", and the key that matches anything.
++++ matchedKeys, err = FilterByHosts(keys, "foo.example.com", false)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]}
++++ compareKeySlices(t, expectedMatch, matchedKeys)
++++
++++ // Should match all of the keys except the empty key.
++++ matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ expectedMatch = keys[1:]
++++ compareKeySlices(t, expectedMatch, matchedKeys)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "crypto"
++++ _ "crypto/sha256" // Registrer SHA224 and SHA256
++++ _ "crypto/sha512" // Registrer SHA384 and SHA512
++++ "fmt"
++++)
++++
++++type signatureAlgorithm struct {
++++ algHeaderParam string
++++ hashID crypto.Hash
++++}
++++
++++func (h *signatureAlgorithm) HeaderParam() string {
++++ return h.algHeaderParam
++++}
++++
++++func (h *signatureAlgorithm) HashID() crypto.Hash {
++++ return h.hashID
++++}
++++
++++var (
++++ rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
++++ rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
++++ rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
++++ es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
++++ es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
++++ es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
++++)
++++
++++func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
++++ switch {
++++ case alg == "RS256":
++++ return rs256, nil
++++ case alg == "RS384":
++++ return rs384, nil
++++ case alg == "RS512":
++++ return rs512, nil
++++ default:
++++ return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
++++ }
++++}
++++
++++func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
++++ switch {
++++ case hashID == crypto.SHA512:
++++ return rs512
++++ case hashID == crypto.SHA384:
++++ return rs384
++++ case hashID == crypto.SHA256:
++++ fallthrough
++++ default:
++++ return rs256
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "bytes"
++++ "crypto"
++++ "crypto/x509"
++++ "encoding/base64"
++++ "encoding/json"
++++ "errors"
++++ "fmt"
++++ "time"
++++ "unicode"
++++)
++++
++++var (
++++ // ErrInvalidSignContent is used when the content to be signed is invalid.
++++ ErrInvalidSignContent = errors.New("invalid sign content")
++++
++++ // ErrInvalidJSONContent is used when invalid json is encountered.
++++ ErrInvalidJSONContent = errors.New("invalid json content")
++++
++++ // ErrMissingSignatureKey is used when the specified signature key
++++ // does not exist in the JSON content.
++++ ErrMissingSignatureKey = errors.New("missing signature key")
++++)
++++
++++type jsHeader struct {
++++ JWK PublicKey `json:"jwk,omitempty"`
++++ Algorithm string `json:"alg"`
++++ Chain []string `json:"x5c,omitempty"`
++++}
++++
++++type jsSignature struct {
++++ Header *jsHeader `json:"header"`
++++ Signature string `json:"signature"`
++++ Protected string `json:"protected,omitempty"`
++++}
++++
++++type signKey struct {
++++ PrivateKey
++++ Chain []*x509.Certificate
++++}
++++
++++// JSONSignature represents a signature of a json object.
++++type JSONSignature struct {
++++ payload string
++++ signatures []*jsSignature
++++ indent string
++++ formatLength int
++++ formatTail []byte
++++}
++++
++++func newJSONSignature() *JSONSignature {
++++ return &JSONSignature{
++++ signatures: make([]*jsSignature, 0, 1),
++++ }
++++}
++++
++++// Payload returns the encoded payload of the signature. This
++++// payload should not be signed directly
++++func (js *JSONSignature) Payload() ([]byte, error) {
++++ return joseBase64UrlDecode(js.payload)
++++}
++++
++++func (js *JSONSignature) protectedHeader() (string, error) {
++++ protected := map[string]interface{}{
++++ "formatLength": js.formatLength,
++++ "formatTail": joseBase64UrlEncode(js.formatTail),
++++ "time": time.Now().UTC().Format(time.RFC3339),
++++ }
++++ protectedBytes, err := json.Marshal(protected)
++++ if err != nil {
++++ return "", err
++++ }
++++
++++ return joseBase64UrlEncode(protectedBytes), nil
++++}
++++
++++func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
++++ buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
++++ copy(buf, protectedHeader)
++++ buf[len(protectedHeader)] = '.'
++++ copy(buf[len(protectedHeader)+1:], js.payload)
++++ return buf, nil
++++}
++++
++++// Sign adds a signature using the given private key.
++++func (js *JSONSignature) Sign(key PrivateKey) error {
++++ protected, err := js.protectedHeader()
++++ if err != nil {
++++ return err
++++ }
++++ signBytes, err := js.signBytes(protected)
++++ if err != nil {
++++ return err
++++ }
++++ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
++++ if err != nil {
++++ return err
++++ }
++++
++++ header := &jsHeader{
++++ JWK: key.PublicKey(),
++++ Algorithm: algorithm,
++++ }
++++ sig := &jsSignature{
++++ Header: header,
++++ Signature: joseBase64UrlEncode(sigBytes),
++++ Protected: protected,
++++ }
++++
++++ js.signatures = append(js.signatures, sig)
++++
++++ return nil
++++}
++++
++++// SignWithChain adds a signature using the given private key
++++// and setting the x509 chain. The public key of the first element
++++// in the chain must be the public key corresponding with the sign key.
++++func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
++++ // Ensure key.Chain[0] is public key for key
++++ //key.Chain.PublicKey
++++ //key.PublicKey().CryptoPublicKey()
++++
++++ // Verify chain
++++ protected, err := js.protectedHeader()
++++ if err != nil {
++++ return err
++++ }
++++ signBytes, err := js.signBytes(protected)
++++ if err != nil {
++++ return err
++++ }
++++ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
++++ if err != nil {
++++ return err
++++ }
++++
++++ header := &jsHeader{
++++ Chain: make([]string, len(chain)),
++++ Algorithm: algorithm,
++++ }
++++
++++ for i, cert := range chain {
++++ header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
++++ }
++++
++++ sig := &jsSignature{
++++ Header: header,
++++ Signature: joseBase64UrlEncode(sigBytes),
++++ Protected: protected,
++++ }
++++
++++ js.signatures = append(js.signatures, sig)
++++
++++ return nil
++++}
++++
++++// Verify verifies all the signatures and returns the list of
++++// public keys used to sign. Any x509 chains are not checked.
++++func (js *JSONSignature) Verify() ([]PublicKey, error) {
++++ keys := make([]PublicKey, len(js.signatures))
++++ for i, signature := range js.signatures {
++++ signBytes, err := js.signBytes(signature.Protected)
++++ if err != nil {
++++ return nil, err
++++ }
++++ var publicKey PublicKey
++++ if len(signature.Header.Chain) > 0 {
++++ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
++++ if err != nil {
++++ return nil, err
++++ }
++++ cert, err := x509.ParseCertificate(certBytes)
++++ if err != nil {
++++ return nil, err
++++ }
++++ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
++++ if err != nil {
++++ return nil, err
++++ }
++++ } else if signature.Header.JWK != nil {
++++ publicKey = signature.Header.JWK
++++ } else {
++++ return nil, errors.New("missing public key")
++++ }
++++
++++ sigBytes, err := joseBase64UrlDecode(signature.Signature)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ keys[i] = publicKey
++++ }
++++ return keys, nil
++++}
++++
++++// VerifyChains verifies all the signatures and the chains associated
++++// with each signature and returns the list of verified chains.
++++// Signatures without an x509 chain are not checked.
++++func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
++++ chains := make([][]*x509.Certificate, 0, len(js.signatures))
++++ for _, signature := range js.signatures {
++++ signBytes, err := js.signBytes(signature.Protected)
++++ if err != nil {
++++ return nil, err
++++ }
++++ var publicKey PublicKey
++++ if len(signature.Header.Chain) > 0 {
++++ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
++++ if err != nil {
++++ return nil, err
++++ }
++++ cert, err := x509.ParseCertificate(certBytes)
++++ if err != nil {
++++ return nil, err
++++ }
++++ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
++++ if err != nil {
++++ return nil, err
++++ }
++++ intermediates := x509.NewCertPool()
++++ if len(signature.Header.Chain) > 1 {
++++ intermediateChain := signature.Header.Chain[1:]
++++ for i := range intermediateChain {
++++ certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
++++ if err != nil {
++++ return nil, err
++++ }
++++ intermediate, err := x509.ParseCertificate(certBytes)
++++ if err != nil {
++++ return nil, err
++++ }
++++ intermediates.AddCert(intermediate)
++++ }
++++ }
++++
++++ verifyOptions := x509.VerifyOptions{
++++ Intermediates: intermediates,
++++ Roots: ca,
++++ }
++++
++++ verifiedChains, err := cert.Verify(verifyOptions)
++++ if err != nil {
++++ return nil, err
++++ }
++++ chains = append(chains, verifiedChains...)
++++
++++ sigBytes, err := joseBase64UrlDecode(signature.Signature)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
++++ if err != nil {
++++ return nil, err
++++ }
++++ }
++++
++++ }
++++ return chains, nil
++++}
++++
++++// JWS returns JSON serialized JWS according to
++++// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
++++func (js *JSONSignature) JWS() ([]byte, error) {
++++ if len(js.signatures) == 0 {
++++ return nil, errors.New("missing signature")
++++ }
++++ jsonMap := map[string]interface{}{
++++ "payload": js.payload,
++++ "signatures": js.signatures,
++++ }
++++
++++ return json.MarshalIndent(jsonMap, "", " ")
++++}
++++
++++func notSpace(r rune) bool {
++++ return !unicode.IsSpace(r)
++++}
++++
++++func detectJSONIndent(jsonContent []byte) (indent string) {
++++ if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
++++ quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
++++ if quoteIndex > 0 {
++++ indent = string(jsonContent[2 : quoteIndex+1])
++++ }
++++ }
++++ return
++++}
++++
++++type jsParsedHeader struct {
++++ JWK json.RawMessage `json:"jwk"`
++++ Algorithm string `json:"alg"`
++++ Chain []string `json:"x5c"`
++++}
++++
++++type jsParsedSignature struct {
++++ Header *jsParsedHeader `json:"header"`
++++ Signature string `json:"signature"`
++++ Protected string `json:"protected"`
++++}
++++
++++// ParseJWS parses a JWS serialized JSON object into a Json Signature.
++++func ParseJWS(content []byte) (*JSONSignature, error) {
++++ type jsParsed struct {
++++ Payload string `json:"payload"`
++++ Signatures []*jsParsedSignature `json:"signatures"`
++++ }
++++ parsed := &jsParsed{}
++++ err := json.Unmarshal(content, parsed)
++++ if err != nil {
++++ return nil, err
++++ }
++++ if len(parsed.Signatures) == 0 {
++++ return nil, errors.New("missing signatures")
++++ }
++++ payload, err := joseBase64UrlDecode(parsed.Payload)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ js, err := NewJSONSignature(payload)
++++ if err != nil {
++++ return nil, err
++++ }
++++ js.signatures = make([]*jsSignature, len(parsed.Signatures))
++++ for i, signature := range parsed.Signatures {
++++ header := &jsHeader{
++++ Algorithm: signature.Header.Algorithm,
++++ }
++++ if signature.Header.Chain != nil {
++++ header.Chain = signature.Header.Chain
++++ }
++++ if signature.Header.JWK != nil {
++++ publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
++++ if err != nil {
++++ return nil, err
++++ }
++++ header.JWK = publicKey
++++ }
++++ js.signatures[i] = &jsSignature{
++++ Header: header,
++++ Signature: signature.Signature,
++++ Protected: signature.Protected,
++++ }
++++ }
++++
++++ return js, nil
++++}
++++
++++// NewJSONSignature returns a new unsigned JWS from a json byte array.
++++// JSONSignature will need to be signed before serializing or storing.
++++func NewJSONSignature(content []byte) (*JSONSignature, error) {
++++ var dataMap map[string]interface{}
++++ err := json.Unmarshal(content, &dataMap)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ js := newJSONSignature()
++++ js.indent = detectJSONIndent(content)
++++
++++ js.payload = joseBase64UrlEncode(content)
++++
++++ // Find trailing } and whitespace, put in protected header
++++ closeIndex := bytes.LastIndexFunc(content, notSpace)
++++ if content[closeIndex] != '}' {
++++ return nil, ErrInvalidJSONContent
++++ }
++++ lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
++++ if content[lastRuneIndex] == ',' {
++++ return nil, ErrInvalidJSONContent
++++ }
++++ js.formatLength = lastRuneIndex + 1
++++ js.formatTail = content[js.formatLength:]
++++
++++ return js, nil
++++}
++++
++++// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
++++// struct. JWS will need to be signed before serializing or storing.
++++func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
++++ switch content.(type) {
++++ case map[string]interface{}:
++++ case struct{}:
++++ default:
++++ return nil, errors.New("invalid data type")
++++ }
++++
++++ js := newJSONSignature()
++++ js.indent = " "
++++
++++ payload, err := json.MarshalIndent(content, "", js.indent)
++++ if err != nil {
++++ return nil, err
++++ }
++++ js.payload = joseBase64UrlEncode(payload)
++++
++++ // Remove '\n}' from formatted section, put in protected header
++++ js.formatLength = len(payload) - 2
++++ js.formatTail = payload[js.formatLength:]
++++
++++ return js, nil
++++}
++++
++++func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
++++ value, ok := m[key]
++++ if !ok {
++++ return 0, false
++++ }
++++ switch v := value.(type) {
++++ case int:
++++ return v, true
++++ case float64:
++++ return int(v), true
++++ default:
++++ return 0, false
++++ }
++++}
++++
++++func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
++++ value, ok := m[key]
++++ if !ok {
++++ return "", false
++++ }
++++ v, ok = value.(string)
++++ return
++++}
++++
++++// ParsePrettySignature parses a formatted signature into a
++++// JSON signature. If the signatures are missing the format information
++++// an error is thrown. The formatted signature must be created by
++++// the same method as format signature.
++++func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
++++ var contentMap map[string]json.RawMessage
++++ err := json.Unmarshal(content, &contentMap)
++++ if err != nil {
++++ return nil, fmt.Errorf("error unmarshalling content: %s", err)
++++ }
++++ sigMessage, ok := contentMap[signatureKey]
++++ if !ok {
++++ return nil, ErrMissingSignatureKey
++++ }
++++
++++ var signatureBlocks []jsParsedSignature
++++ err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
++++ if err != nil {
++++ return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
++++ }
++++
++++ js := newJSONSignature()
++++ js.signatures = make([]*jsSignature, len(signatureBlocks))
++++
++++ for i, signatureBlock := range signatureBlocks {
++++ protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
++++ if err != nil {
++++ return nil, fmt.Errorf("base64 decode error: %s", err)
++++ }
++++ var protectedHeader map[string]interface{}
++++ err = json.Unmarshal(protectedBytes, &protectedHeader)
++++ if err != nil {
++++ return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
++++ }
++++
++++ formatLength, ok := readIntFromMap("formatLength", protectedHeader)
++++ if !ok {
++++ return nil, errors.New("missing formatted length")
++++ }
++++ encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
++++ if !ok {
++++ return nil, errors.New("missing formatted tail")
++++ }
++++ formatTail, err := joseBase64UrlDecode(encodedTail)
++++ if err != nil {
++++ return nil, fmt.Errorf("base64 decode error on tail: %s", err)
++++ }
++++ if js.formatLength == 0 {
++++ js.formatLength = formatLength
++++ } else if js.formatLength != formatLength {
++++ return nil, errors.New("conflicting format length")
++++ }
++++ if len(js.formatTail) == 0 {
++++ js.formatTail = formatTail
++++ } else if bytes.Compare(js.formatTail, formatTail) != 0 {
++++ return nil, errors.New("conflicting format tail")
++++ }
++++
++++ header := &jsHeader{
++++ Algorithm: signatureBlock.Header.Algorithm,
++++ Chain: signatureBlock.Header.Chain,
++++ }
++++ if signatureBlock.Header.JWK != nil {
++++ publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
++++ if err != nil {
++++ return nil, fmt.Errorf("error unmarshalling public key: %s", err)
++++ }
++++ header.JWK = publicKey
++++ }
++++ js.signatures[i] = &jsSignature{
++++ Header: header,
++++ Signature: signatureBlock.Signature,
++++ Protected: signatureBlock.Protected,
++++ }
++++ }
++++ if js.formatLength > len(content) {
++++ return nil, errors.New("invalid format length")
++++ }
++++ formatted := make([]byte, js.formatLength+len(js.formatTail))
++++ copy(formatted, content[:js.formatLength])
++++ copy(formatted[js.formatLength:], js.formatTail)
++++ js.indent = detectJSONIndent(formatted)
++++ js.payload = joseBase64UrlEncode(formatted)
++++
++++ return js, nil
++++}
++++
++++// PrettySignature formats a json signature into an easy to read
++++// single json serialized object.
++++func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
++++ if len(js.signatures) == 0 {
++++ return nil, errors.New("no signatures")
++++ }
++++ payload, err := joseBase64UrlDecode(js.payload)
++++ if err != nil {
++++ return nil, err
++++ }
++++ payload = payload[:js.formatLength]
++++
++++ var marshalled []byte
++++ var marshallErr error
++++ if js.indent != "" {
++++ marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
++++ } else {
++++ marshalled, marshallErr = json.Marshal(js.signatures)
++++ }
++++ if marshallErr != nil {
++++ return nil, marshallErr
++++ }
++++
++++ buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
++++ buf.Write(payload)
++++ buf.WriteByte(',')
++++ if js.indent != "" {
++++ buf.WriteByte('\n')
++++ buf.WriteString(js.indent)
++++ buf.WriteByte('"')
++++ buf.WriteString(signatureKey)
++++ buf.WriteString("\": ")
++++ buf.Write(marshalled)
++++ buf.WriteByte('\n')
++++ } else {
++++ buf.WriteByte('"')
++++ buf.WriteString(signatureKey)
++++ buf.WriteString("\":")
++++ buf.Write(marshalled)
++++ }
++++ buf.WriteByte('}')
++++
++++ return buf.Bytes(), nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "bytes"
++++ "crypto/x509"
++++ "encoding/json"
++++ "fmt"
++++ "testing"
++++
++++ "github.com/docker/libtrust/testutil"
++++)
++++
++++func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) {
++++ testMap := map[string]interface{}{
++++ "name": "dmcgowan/mycontainer",
++++ "config": map[string]interface{}{
++++ "ports": []int{9101, 9102},
++++ "run": "/bin/echo \"Hello\"",
++++ },
++++ "layers": []string{
++++ "2893c080-27f5-11e4-8c21-0800200c9a66",
++++ "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55",
++++ "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4",
++++ "0b6da891-7f7f-4abf-9c97-7887549e696c",
++++ "1d960389-ae4f-4011-85fd-18d0f96a67ad",
++++ },
++++ }
++++ formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{`
++++ formattedSection = fmt.Sprintf(formattedSection, sigKey)
++++ if indent != "" {
++++ buf := bytes.NewBuffer(nil)
++++ json.Indent(buf, []byte(formattedSection), "", indent)
++++ return testMap, buf.Bytes()
++++ }
++++ return testMap, []byte(formattedSection)
++++
++++}
++++
++++func TestSignJSON(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generating EC key: %s", err)
++++ }
++++
++++ testMap, _ := createTestJSON("buildSignatures", " ")
++++ indented, err := json.MarshalIndent(testMap, "", " ")
++++ if err != nil {
++++ t.Fatalf("Marshall error: %s", err)
++++ }
++++
++++ js, err := NewJSONSignature(indented)
++++ if err != nil {
++++ t.Fatalf("Error creating JSON signature: %s", err)
++++ }
++++ err = js.Sign(key)
++++ if err != nil {
++++ t.Fatalf("Error signing content: %s", err)
++++ }
++++
++++ keys, err := js.Verify()
++++ if err != nil {
++++ t.Fatalf("Error verifying signature: %s", err)
++++ }
++++ if len(keys) != 1 {
++++ t.Fatalf("Error wrong number of keys returned")
++++ }
++++ if keys[0].KeyID() != key.KeyID() {
++++ t.Fatalf("Unexpected public key returned")
++++ }
++++
++++}
++++
++++func TestSignMap(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generating EC key: %s", err)
++++ }
++++
++++ testMap, _ := createTestJSON("buildSignatures", " ")
++++ js, err := NewJSONSignatureFromMap(testMap)
++++ if err != nil {
++++ t.Fatalf("Error creating JSON signature: %s", err)
++++ }
++++ err = js.Sign(key)
++++ if err != nil {
++++ t.Fatalf("Error signing JSON signature: %s", err)
++++ }
++++
++++ keys, err := js.Verify()
++++ if err != nil {
++++ t.Fatalf("Error verifying signature: %s", err)
++++ }
++++ if len(keys) != 1 {
++++ t.Fatalf("Error wrong number of keys returned")
++++ }
++++ if keys[0].KeyID() != key.KeyID() {
++++ t.Fatalf("Unexpected public key returned")
++++ }
++++}
++++
++++func TestFormattedJson(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generating EC key: %s", err)
++++ }
++++
++++ testMap, firstSection := createTestJSON("buildSignatures", " ")
++++ indented, err := json.MarshalIndent(testMap, "", " ")
++++ if err != nil {
++++ t.Fatalf("Marshall error: %s", err)
++++ }
++++
++++ js, err := NewJSONSignature(indented)
++++ if err != nil {
++++ t.Fatalf("Error creating JSON signature: %s", err)
++++ }
++++ err = js.Sign(key)
++++ if err != nil {
++++ t.Fatalf("Error signing content: %s", err)
++++ }
++++
++++ b, err := js.PrettySignature("buildSignatures")
++++ if err != nil {
++++ t.Fatalf("Error signing map: %s", err)
++++ }
++++
++++ if bytes.Compare(b[:len(firstSection)], firstSection) != 0 {
++++ t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)])
++++ }
++++
++++ parsed, err := ParsePrettySignature(b, "buildSignatures")
++++ if err != nil {
++++ t.Fatalf("Error parsing formatted signature: %s", err)
++++ }
++++
++++ keys, err := parsed.Verify()
++++ if err != nil {
++++ t.Fatalf("Error verifying signature: %s", err)
++++ }
++++ if len(keys) != 1 {
++++ t.Fatalf("Error wrong number of keys returned")
++++ }
++++ if keys[0].KeyID() != key.KeyID() {
++++ t.Fatalf("Unexpected public key returned")
++++ }
++++
++++ var unmarshalled map[string]interface{}
++++ err = json.Unmarshal(b, &unmarshalled)
++++ if err != nil {
++++ t.Fatalf("Could not unmarshall after parse: %s", err)
++++ }
++++
++++}
++++
++++func TestFormattedFlatJson(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generating EC key: %s", err)
++++ }
++++
++++ testMap, firstSection := createTestJSON("buildSignatures", "")
++++ unindented, err := json.Marshal(testMap)
++++ if err != nil {
++++ t.Fatalf("Marshall error: %s", err)
++++ }
++++
++++ js, err := NewJSONSignature(unindented)
++++ if err != nil {
++++ t.Fatalf("Error creating JSON signature: %s", err)
++++ }
++++ err = js.Sign(key)
++++ if err != nil {
++++ t.Fatalf("Error signing JSON signature: %s", err)
++++ }
++++
++++ b, err := js.PrettySignature("buildSignatures")
++++ if err != nil {
++++ t.Fatalf("Error signing map: %s", err)
++++ }
++++
++++ if bytes.Compare(b[:len(firstSection)], firstSection) != 0 {
++++ t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)])
++++ }
++++
++++ parsed, err := ParsePrettySignature(b, "buildSignatures")
++++ if err != nil {
++++ t.Fatalf("Error parsing formatted signature: %s", err)
++++ }
++++
++++ keys, err := parsed.Verify()
++++ if err != nil {
++++ t.Fatalf("Error verifying signature: %s", err)
++++ }
++++ if len(keys) != 1 {
++++ t.Fatalf("Error wrong number of keys returned")
++++ }
++++ if keys[0].KeyID() != key.KeyID() {
++++ t.Fatalf("Unexpected public key returned")
++++ }
++++}
++++
++++func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) {
++++ parent := ca
++++ parentKey := key
++++ chain := make([]*x509.Certificate, 6)
++++ for i := 5; i > 0; i-- {
++++ intermediatekey, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generate key: %s", err)
++++ }
++++ chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent)
++++ if err != nil {
++++ t.Fatalf("Error generating intermdiate certificate: %s", err)
++++ }
++++ parent = chain[i]
++++ parentKey = intermediatekey
++++ }
++++ trustKey, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generate key: %s", err)
++++ }
++++ chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent)
++++ if err != nil {
++++ t.Fatalf("Error generate trust cert: %s", err)
++++ }
++++
++++ return trustKey, chain
++++}
++++
++++func TestChainVerify(t *testing.T) {
++++ caKey, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generating key: %s", err)
++++ }
++++ ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey())
++++ if err != nil {
++++ t.Fatalf("Error generating ca: %s", err)
++++ }
++++ trustKey, chain := generateTrustChain(t, caKey, ca)
++++
++++ testMap, _ := createTestJSON("verifySignatures", " ")
++++ js, err := NewJSONSignatureFromMap(testMap)
++++ if err != nil {
++++ t.Fatalf("Error creating JSONSignature from map: %s", err)
++++ }
++++
++++ err = js.SignWithChain(trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error signing with chain: %s", err)
++++ }
++++
++++ pool := x509.NewCertPool()
++++ pool.AddCert(ca)
++++ chains, err := js.VerifyChains(pool)
++++ if err != nil {
++++ t.Fatalf("Error verifying content: %s", err)
++++ }
++++ if len(chains) != 1 {
++++ t.Fatalf("Unexpected chains length: %d", len(chains))
++++ }
++++ if len(chains[0]) != 7 {
++++ t.Fatalf("Unexpected chain length: %d", len(chains[0]))
++++ }
++++}
++++
++++func TestInvalidChain(t *testing.T) {
++++ caKey, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generating key: %s", err)
++++ }
++++ ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey())
++++ if err != nil {
++++ t.Fatalf("Error generating ca: %s", err)
++++ }
++++ trustKey, chain := generateTrustChain(t, caKey, ca)
++++
++++ testMap, _ := createTestJSON("verifySignatures", " ")
++++ js, err := NewJSONSignatureFromMap(testMap)
++++ if err != nil {
++++ t.Fatalf("Error creating JSONSignature from map: %s", err)
++++ }
++++
++++ err = js.SignWithChain(trustKey, chain[:5])
++++ if err != nil {
++++ t.Fatalf("Error signing with chain: %s", err)
++++ }
++++
++++ pool := x509.NewCertPool()
++++ pool.AddCert(ca)
++++ chains, err := js.VerifyChains(pool)
++++ if err == nil {
++++ t.Fatalf("Expected error verifying with bad chain")
++++ }
++++ if len(chains) != 0 {
++++ t.Fatalf("Unexpected chains returned from invalid verify")
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "crypto"
++++ "crypto/ecdsa"
++++ "crypto/rsa"
++++ "crypto/x509"
++++ "encoding/json"
++++ "encoding/pem"
++++ "errors"
++++ "fmt"
++++ "io"
++++)
++++
++++// PublicKey is a generic interface for a Public Key.
++++type PublicKey interface {
++++ // KeyType returns the key type for this key. For elliptic curve keys,
++++ // this value should be "EC". For RSA keys, this value should be "RSA".
++++ KeyType() string
++++ // KeyID returns a distinct identifier which is unique to this Public Key.
++++ // The format generated by this library is a base32 encoding of a 240 bit
++++ // hash of the public key data divided into 12 groups like so:
++++ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
++++ KeyID() string
++++ // Verify verifyies the signature of the data in the io.Reader using this
++++ // Public Key. The alg parameter should identify the digital signature
++++ // algorithm which was used to produce the signature and should be
++++ // supported by this public key. Returns a nil error if the signature
++++ // is valid.
++++ Verify(data io.Reader, alg string, signature []byte) error
++++ // CryptoPublicKey returns the internal object which can be used as a
++++ // crypto.PublicKey for use with other standard library operations. The type
++++ // is either *rsa.PublicKey or *ecdsa.PublicKey
++++ CryptoPublicKey() crypto.PublicKey
++++ // These public keys can be serialized to the standard JSON encoding for
++++ // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
++++ // Algorithms.
++++ MarshalJSON() ([]byte, error)
++++ // These keys can also be serialized to the standard PEM encoding.
++++ PEMBlock() (*pem.Block, error)
++++ // The string representation of a key is its key type and ID.
++++ String() string
++++ AddExtendedField(string, interface{})
++++ GetExtendedField(string) interface{}
++++}
++++
++++// PrivateKey is a generic interface for a Private Key.
++++type PrivateKey interface {
++++ // A PrivateKey contains all fields and methods of a PublicKey of the
++++ // same type. The MarshalJSON method also outputs the private key as a
++++ // JSON Web Key, and the PEMBlock method outputs the private key as a
++++ // PEM block.
++++ PublicKey
++++ // PublicKey returns the PublicKey associated with this PrivateKey.
++++ PublicKey() PublicKey
++++ // Sign signs the data read from the io.Reader using a signature algorithm
++++ // supported by the private key. If the specified hashing algorithm is
++++ // supported by this key, that hash function is used to generate the
++++ // signature otherwise the the default hashing algorithm for this key is
++++ // used. Returns the signature and identifier of the algorithm used.
++++ Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
++++ // CryptoPrivateKey returns the internal object which can be used as a
++++ // crypto.PublicKey for use with other standard library operations. The
++++ // type is either *rsa.PublicKey or *ecdsa.PublicKey
++++ CryptoPrivateKey() crypto.PrivateKey
++++}
++++
++++// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
++++// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
++++// key is of an unsupported type.
++++func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
++++ switch cryptoPublicKey := cryptoPublicKey.(type) {
++++ case *ecdsa.PublicKey:
++++ return fromECPublicKey(cryptoPublicKey)
++++ case *rsa.PublicKey:
++++ return fromRSAPublicKey(cryptoPublicKey), nil
++++ default:
++++ return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
++++ }
++++}
++++
++++// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
++++// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
++++// key is of an unsupported type.
++++func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
++++ switch cryptoPrivateKey := cryptoPrivateKey.(type) {
++++ case *ecdsa.PrivateKey:
++++ return fromECPrivateKey(cryptoPrivateKey)
++++ case *rsa.PrivateKey:
++++ return fromRSAPrivateKey(cryptoPrivateKey), nil
++++ default:
++++ return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
++++ }
++++}
++++
++++// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
++++// PublicKey or an error if there is a problem with the encoding.
++++func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
++++ pemBlock, _ := pem.Decode(data)
++++ if pemBlock == nil {
++++ return nil, errors.New("unable to find PEM encoded data")
++++ } else if pemBlock.Type != "PUBLIC KEY" {
++++ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
++++ }
++++
++++ return pubKeyFromPEMBlock(pemBlock)
++++}
++++
++++// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
++++// PEM blocks appended one after the other and returns a slice of PublicKey
++++// objects that it finds.
++++func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
++++ pubKeys := []PublicKey{}
++++
++++ for {
++++ var pemBlock *pem.Block
++++ pemBlock, data = pem.Decode(data)
++++ if pemBlock == nil {
++++ break
++++ } else if pemBlock.Type != "PUBLIC KEY" {
++++ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
++++ }
++++
++++ pubKey, err := pubKeyFromPEMBlock(pemBlock)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ pubKeys = append(pubKeys, pubKey)
++++ }
++++
++++ return pubKeys, nil
++++}
++++
++++// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
++++// PrivateKey or an error if there is a problem with the encoding.
++++func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
++++ pemBlock, _ := pem.Decode(data)
++++ if pemBlock == nil {
++++ return nil, errors.New("unable to find PEM encoded data")
++++ }
++++
++++ var key PrivateKey
++++
++++ switch {
++++ case pemBlock.Type == "RSA PRIVATE KEY":
++++ rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
++++ }
++++ key = fromRSAPrivateKey(rsaPrivateKey)
++++ case pemBlock.Type == "EC PRIVATE KEY":
++++ ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
++++ }
++++ key, err = fromECPrivateKey(ecPrivateKey)
++++ if err != nil {
++++ return nil, err
++++ }
++++ default:
++++ return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
++++ }
++++
++++ addPEMHeadersToKey(pemBlock, key.PublicKey())
++++
++++ return key, nil
++++}
++++
++++// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
++++// Public Key to be used with libtrust.
++++func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
++++ jwk := make(map[string]interface{})
++++
++++ err := json.Unmarshal(data, &jwk)
++++ if err != nil {
++++ return nil, fmt.Errorf(
++++ "decoding JWK Public Key JSON data: %s\n", err,
++++ )
++++ }
++++
++++ // Get the Key Type value.
++++ kty, err := stringFromMap(jwk, "kty")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK Public Key type: %s", err)
++++ }
++++
++++ switch {
++++ case kty == "EC":
++++ // Call out to unmarshal EC public key.
++++ return ecPublicKeyFromMap(jwk)
++++ case kty == "RSA":
++++ // Call out to unmarshal RSA public key.
++++ return rsaPublicKeyFromMap(jwk)
++++ default:
++++ return nil, fmt.Errorf(
++++ "JWK Public Key type not supported: %q\n", kty,
++++ )
++++ }
++++}
++++
++++// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
++++// and returns a slice of Public Key objects.
++++func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
++++ rawKeys, err := loadJSONKeySetRaw(data)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ pubKeys := make([]PublicKey, 0, len(rawKeys))
++++
++++ for _, rawKey := range rawKeys {
++++ pubKey, err := UnmarshalPublicKeyJWK(rawKey)
++++ if err != nil {
++++ return nil, err
++++ }
++++ pubKeys = append(pubKeys, pubKey)
++++ }
++++
++++ return pubKeys, nil
++++}
++++
++++// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
++++// Private Key to be used with libtrust.
++++func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
++++ jwk := make(map[string]interface{})
++++
++++ err := json.Unmarshal(data, &jwk)
++++ if err != nil {
++++ return nil, fmt.Errorf(
++++ "decoding JWK Private Key JSON data: %s\n", err,
++++ )
++++ }
++++
++++ // Get the Key Type value.
++++ kty, err := stringFromMap(jwk, "kty")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK Private Key type: %s", err)
++++ }
++++
++++ switch {
++++ case kty == "EC":
++++ // Call out to unmarshal EC private key.
++++ return ecPrivateKeyFromMap(jwk)
++++ case kty == "RSA":
++++ // Call out to unmarshal RSA private key.
++++ return rsaPrivateKeyFromMap(jwk)
++++ default:
++++ return nil, fmt.Errorf(
++++ "JWK Private Key type not supported: %q\n", kty,
++++ )
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "encoding/json"
++++ "encoding/pem"
++++ "errors"
++++ "fmt"
++++ "io/ioutil"
++++ "os"
++++ "strings"
++++)
++++
++++var (
++++ // ErrKeyFileDoesNotExist indicates that the private key file does not exist.
++++ ErrKeyFileDoesNotExist = errors.New("key file does not exist")
++++)
++++
++++func readKeyFileBytes(filename string) ([]byte, error) {
++++ data, err := ioutil.ReadFile(filename)
++++ if err != nil {
++++ if os.IsNotExist(err) {
++++ err = ErrKeyFileDoesNotExist
++++ } else {
++++ err = fmt.Errorf("unable to read key file %s: %s", filename, err)
++++ }
++++
++++ return nil, err
++++ }
++++
++++ return data, nil
++++}
++++
++++/*
++++ Loading and Saving of Public and Private Keys in either PEM or JWK format.
++++*/
++++
++++// LoadKeyFile opens the given filename and attempts to read a Private Key
++++// encoded in either PEM or JWK format (if .json or .jwk file extension).
++++func LoadKeyFile(filename string) (PrivateKey, error) {
++++ contents, err := readKeyFileBytes(filename)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ var key PrivateKey
++++
++++ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
++++ key, err = UnmarshalPrivateKeyJWK(contents)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
++++ }
++++ } else {
++++ key, err = UnmarshalPrivateKeyPEM(contents)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
++++ }
++++ }
++++
++++ return key, nil
++++}
++++
++++// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
++++// encoded in either PEM or JWK format (if .json or .jwk file extension).
++++func LoadPublicKeyFile(filename string) (PublicKey, error) {
++++ contents, err := readKeyFileBytes(filename)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ var key PublicKey
++++
++++ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
++++ key, err = UnmarshalPublicKeyJWK(contents)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
++++ }
++++ } else {
++++ key, err = UnmarshalPublicKeyPEM(contents)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
++++ }
++++ }
++++
++++ return key, nil
++++}
++++
++++// SaveKey saves the given key to a file using the provided filename.
++++// This process will overwrite any existing file at the provided location.
++++func SaveKey(filename string, key PrivateKey) error {
++++ var encodedKey []byte
++++ var err error
++++
++++ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
++++ // Encode in JSON Web Key format.
++++ encodedKey, err = json.MarshalIndent(key, "", " ")
++++ if err != nil {
++++ return fmt.Errorf("unable to encode private key JWK: %s", err)
++++ }
++++ } else {
++++ // Encode in PEM format.
++++ pemBlock, err := key.PEMBlock()
++++ if err != nil {
++++ return fmt.Errorf("unable to encode private key PEM: %s", err)
++++ }
++++ encodedKey = pem.EncodeToMemory(pemBlock)
++++ }
++++
++++ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
++++ if err != nil {
++++ return fmt.Errorf("unable to write private key file %s: %s", filename, err)
++++ }
++++
++++ return nil
++++}
++++
++++// SavePublicKey saves the given public key to the file.
++++func SavePublicKey(filename string, key PublicKey) error {
++++ var encodedKey []byte
++++ var err error
++++
++++ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
++++ // Encode in JSON Web Key format.
++++ encodedKey, err = json.MarshalIndent(key, "", " ")
++++ if err != nil {
++++ return fmt.Errorf("unable to encode public key JWK: %s", err)
++++ }
++++ } else {
++++ // Encode in PEM format.
++++ pemBlock, err := key.PEMBlock()
++++ if err != nil {
++++ return fmt.Errorf("unable to encode public key PEM: %s", err)
++++ }
++++ encodedKey = pem.EncodeToMemory(pemBlock)
++++ }
++++
++++ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
++++ if err != nil {
++++ return fmt.Errorf("unable to write public key file %s: %s", filename, err)
++++ }
++++
++++ return nil
++++}
++++
++++// Public Key Set files
++++
++++type jwkSet struct {
++++ Keys []json.RawMessage `json:"keys"`
++++}
++++
++++// LoadKeySetFile loads a key set
++++func LoadKeySetFile(filename string) ([]PublicKey, error) {
++++ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
++++ return loadJSONKeySetFile(filename)
++++ }
++++
++++ // Must be a PEM format file
++++ return loadPEMKeySetFile(filename)
++++}
++++
++++func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
++++ if len(data) == 0 {
++++ // This is okay, just return an empty slice.
++++ return []json.RawMessage{}, nil
++++ }
++++
++++ keySet := jwkSet{}
++++
++++ err := json.Unmarshal(data, &keySet)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
++++ }
++++
++++ return keySet.Keys, nil
++++}
++++
++++func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
++++ contents, err := readKeyFileBytes(filename)
++++ if err != nil && err != ErrKeyFileDoesNotExist {
++++ return nil, err
++++ }
++++
++++ return UnmarshalPublicKeyJWKSet(contents)
++++}
++++
++++func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
++++ data, err := readKeyFileBytes(filename)
++++ if err != nil && err != ErrKeyFileDoesNotExist {
++++ return nil, err
++++ }
++++
++++ return UnmarshalPublicKeyPEMBundle(data)
++++}
++++
++++// AddKeySetFile adds a key to a key set
++++func AddKeySetFile(filename string, key PublicKey) error {
++++ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
++++ return addKeySetJSONFile(filename, key)
++++ }
++++
++++ // Must be a PEM format file
++++ return addKeySetPEMFile(filename, key)
++++}
++++
++++func addKeySetJSONFile(filename string, key PublicKey) error {
++++ encodedKey, err := json.Marshal(key)
++++ if err != nil {
++++ return fmt.Errorf("unable to encode trusted client key: %s", err)
++++ }
++++
++++ contents, err := readKeyFileBytes(filename)
++++ if err != nil && err != ErrKeyFileDoesNotExist {
++++ return err
++++ }
++++
++++ rawEntries, err := loadJSONKeySetRaw(contents)
++++ if err != nil {
++++ return err
++++ }
++++
++++ rawEntries = append(rawEntries, json.RawMessage(encodedKey))
++++ entriesWrapper := jwkSet{Keys: rawEntries}
++++
++++ encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
++++ if err != nil {
++++ return fmt.Errorf("unable to encode trusted client keys: %s", err)
++++ }
++++
++++ err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
++++ if err != nil {
++++ return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
++++ }
++++
++++ return nil
++++}
++++
++++func addKeySetPEMFile(filename string, key PublicKey) error {
++++ // Encode to PEM, open file for appending, write PEM.
++++ file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
++++ if err != nil {
++++ return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
++++ }
++++ defer file.Close()
++++
++++ pemBlock, err := key.PEMBlock()
++++ if err != nil {
++++ return fmt.Errorf("unable to encoded trusted key: %s", err)
++++ }
++++
++++ _, err = file.Write(pem.EncodeToMemory(pemBlock))
++++ if err != nil {
++++ return fmt.Errorf("unable to write trusted keys file: %s", err)
++++ }
++++
++++ return nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "errors"
++++ "io/ioutil"
++++ "os"
++++ "testing"
++++)
++++
++++func makeTempFile(t *testing.T, prefix string) (filename string) {
++++ file, err := ioutil.TempFile("", prefix)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ filename = file.Name()
++++ file.Close()
++++
++++ return
++++}
++++
++++func TestKeyFiles(t *testing.T) {
++++ key, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ testKeyFiles(t, key)
++++
++++ key, err = GenerateRSA2048PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ testKeyFiles(t, key)
++++}
++++
++++func testKeyFiles(t *testing.T, key PrivateKey) {
++++ var err error
++++
++++ privateKeyFilename := makeTempFile(t, "private_key")
++++ privateKeyFilenamePEM := privateKeyFilename + ".pem"
++++ privateKeyFilenameJWK := privateKeyFilename + ".jwk"
++++
++++ publicKeyFilename := makeTempFile(t, "public_key")
++++ publicKeyFilenamePEM := publicKeyFilename + ".pem"
++++ publicKeyFilenameJWK := publicKeyFilename + ".jwk"
++++
++++ if err = SaveKey(privateKeyFilenamePEM, key); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if err = SaveKey(privateKeyFilenameJWK, key); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if key.KeyID() != loadedPEMKey.KeyID() {
++++ t.Fatal(errors.New("key IDs do not match"))
++++ }
++++
++++ if key.KeyID() != loadedJWKKey.KeyID() {
++++ t.Fatal(errors.New("key IDs do not match"))
++++ }
++++
++++ if key.KeyID() != loadedPEMPublicKey.KeyID() {
++++ t.Fatal(errors.New("key IDs do not match"))
++++ }
++++
++++ if key.KeyID() != loadedJWKPublicKey.KeyID() {
++++ t.Fatal(errors.New("key IDs do not match"))
++++ }
++++
++++ os.Remove(privateKeyFilename)
++++ os.Remove(privateKeyFilenamePEM)
++++ os.Remove(privateKeyFilenameJWK)
++++ os.Remove(publicKeyFilename)
++++ os.Remove(publicKeyFilenamePEM)
++++ os.Remove(publicKeyFilenameJWK)
++++}
++++
++++func TestTrustedHostKeysFile(t *testing.T) {
++++ trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys")
++++ trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem"
++++ trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json"
++++
++++ testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM)
++++ testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK)
++++
++++ os.Remove(trustedHostKeysFilename)
++++ os.Remove(trustedHostKeysFilenamePEM)
++++ os.Remove(trustedHostKeysFilenameJWK)
++++}
++++
++++func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) {
++++ hostAddress1 := "docker.example.com:2376"
++++ hostKey1, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ hostKey1.AddExtendedField("hosts", []string{hostAddress1})
++++ err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey())
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ for addr, hostKey := range trustedHostKeysMapping {
++++ t.Logf("Host Address: %s\n", addr)
++++ t.Logf("Host Key: %s\n\n", hostKey)
++++ }
++++
++++ hostAddress2 := "192.168.59.103:2376"
++++ hostKey2, err := GenerateRSA2048PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ hostKey2.AddExtendedField("hosts", hostAddress2)
++++ err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey())
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ for addr, hostKey := range trustedHostKeysMapping {
++++ t.Logf("Host Address: %s\n", addr)
++++ t.Logf("Host Key: %s\n\n", hostKey)
++++ }
++++
++++}
++++
++++func TestTrustedClientKeysFile(t *testing.T) {
++++ trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys")
++++ trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem"
++++ trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json"
++++
++++ testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM)
++++ testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK)
++++
++++ os.Remove(trustedClientKeysFilename)
++++ os.Remove(trustedClientKeysFilenamePEM)
++++ os.Remove(trustedClientKeysFilenameJWK)
++++}
++++
++++func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) {
++++ clientKey1, err := GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey())
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ for _, clientKey := range trustedClientKeys {
++++ t.Logf("Client Key: %s\n", clientKey)
++++ }
++++
++++ clientKey2, err := GenerateRSA2048PrivateKey()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey())
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ for _, clientKey := range trustedClientKeys {
++++ t.Logf("Client Key: %s\n", clientKey)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "crypto"
++++ "crypto/rand"
++++ "crypto/rsa"
++++ "crypto/x509"
++++ "encoding/json"
++++ "encoding/pem"
++++ "errors"
++++ "fmt"
++++ "io"
++++ "math/big"
++++)
++++
++++/*
++++ * RSA DSA PUBLIC KEY
++++ */
++++
++++// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
++++type rsaPublicKey struct {
++++ *rsa.PublicKey
++++ extended map[string]interface{}
++++}
++++
++++func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
++++ return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
++++}
++++
++++// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
++++func (k *rsaPublicKey) KeyType() string {
++++ return "RSA"
++++}
++++
++++// KeyID returns a distinct identifier which is unique to this Public Key.
++++func (k *rsaPublicKey) KeyID() string {
++++ // Generate and return a 'libtrust' fingerprint of the RSA public key.
++++ // For an RSA key this should be:
++++ // SHA256("RSA"+bytes(N)+bytes(E))
++++ // Then truncated to 240 bits and encoded into 12 base32 groups like so:
++++ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
++++ hasher := crypto.SHA256.New()
++++ hasher.Write([]byte(k.KeyType()))
++++ hasher.Write(k.N.Bytes())
++++ hasher.Write(serializeRSAPublicExponentParam(k.E))
++++ return keyIDEncode(hasher.Sum(nil)[:30])
++++}
++++
++++func (k *rsaPublicKey) String() string {
++++ return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
++++}
++++
++++// Verify verifyies the signature of the data in the io.Reader using this Public Key.
++++// The alg parameter should be the name of the JWA digital signature algorithm
++++// which was used to produce the signature and should be supported by this
++++// public key. Returns a nil error if the signature is valid.
++++func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
++++ // Verify the signature of the given date, return non-nil error if valid.
++++ sigAlg, err := rsaSignatureAlgorithmByName(alg)
++++ if err != nil {
++++ return fmt.Errorf("unable to verify Signature: %s", err)
++++ }
++++
++++ hasher := sigAlg.HashID().New()
++++ _, err = io.Copy(hasher, data)
++++ if err != nil {
++++ return fmt.Errorf("error reading data to sign: %s", err)
++++ }
++++ hash := hasher.Sum(nil)
++++
++++ err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
++++ if err != nil {
++++ return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
++++ }
++++
++++ return nil
++++}
++++
++++// CryptoPublicKey returns the internal object which can be used as a
++++// crypto.PublicKey for use with other standard library operations. The type
++++// is either *rsa.PublicKey or *ecdsa.PublicKey
++++func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
++++ return k.PublicKey
++++}
++++
++++func (k *rsaPublicKey) toMap() map[string]interface{} {
++++ jwk := make(map[string]interface{})
++++ for k, v := range k.extended {
++++ jwk[k] = v
++++ }
++++ jwk["kty"] = k.KeyType()
++++ jwk["kid"] = k.KeyID()
++++ jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
++++ jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
++++
++++ return jwk
++++}
++++
++++// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
++++// RSA keys.
++++func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
++++ return json.Marshal(k.toMap())
++++}
++++
++++// PEMBlock serializes this Public Key to DER-encoded PKIX format.
++++func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
++++ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
++++ }
++++ k.extended["keyID"] = k.KeyID() // For display purposes.
++++ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
++++}
++++
++++func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
++++ k.extended[field] = value
++++}
++++
++++func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
++++ v, ok := k.extended[field]
++++ if !ok {
++++ return nil
++++ }
++++ return v
++++}
++++
++++func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
++++ // JWK key type (kty) has already been determined to be "RSA".
++++ // Need to extract 'n', 'e', and 'kid' and check for
++++ // consistency.
++++
++++ // Get the modulus parameter N.
++++ nB64Url, err := stringFromMap(jwk, "n")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
++++ }
++++
++++ n, err := parseRSAModulusParam(nB64Url)
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
++++ }
++++
++++ // Get the public exponent E.
++++ eB64Url, err := stringFromMap(jwk, "e")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
++++ }
++++
++++ e, err := parseRSAPublicExponentParam(eB64Url)
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
++++ }
++++
++++ key := &rsaPublicKey{
++++ PublicKey: &rsa.PublicKey{N: n, E: e},
++++ }
++++
++++ // Key ID is optional, but if it exists, it should match the key.
++++ _, ok := jwk["kid"]
++++ if ok {
++++ kid, err := stringFromMap(jwk, "kid")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
++++ }
++++ if kid != key.KeyID() {
++++ return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
++++ }
++++ }
++++
++++ if _, ok := jwk["d"]; ok {
++++ return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
++++ }
++++
++++ key.extended = jwk
++++
++++ return key, nil
++++}
++++
++++/*
++++ * RSA DSA PRIVATE KEY
++++ */
++++
++++// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
++++type rsaPrivateKey struct {
++++ rsaPublicKey
++++ *rsa.PrivateKey
++++}
++++
++++func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
++++ return &rsaPrivateKey{
++++ *fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
++++ cryptoPrivateKey,
++++ }
++++}
++++
++++// PublicKey returns the Public Key data associated with this Private Key.
++++func (k *rsaPrivateKey) PublicKey() PublicKey {
++++ return &k.rsaPublicKey
++++}
++++
++++func (k *rsaPrivateKey) String() string {
++++ return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
++++}
++++
++++// Sign signs the data read from the io.Reader using a signature algorithm supported
++++// by the RSA private key. If the specified hashing algorithm is supported by
++++// this key, that hash function is used to generate the signature otherwise the
++++// the default hashing algorithm for this key is used. Returns the signature
++++// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
++++// "RS512".
++++func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
++++ // Generate a signature of the data using the internal alg.
++++ sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
++++ hasher := sigAlg.HashID().New()
++++
++++ _, err = io.Copy(hasher, data)
++++ if err != nil {
++++ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
++++ }
++++ hash := hasher.Sum(nil)
++++
++++ signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
++++ if err != nil {
++++ return nil, "", fmt.Errorf("error producing signature: %s", err)
++++ }
++++
++++ alg = sigAlg.HeaderParam()
++++
++++ return
++++}
++++
++++// CryptoPrivateKey returns the internal object which can be used as a
++++// crypto.PublicKey for use with other standard library operations. The type
++++// is either *rsa.PublicKey or *ecdsa.PublicKey
++++func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
++++ return k.PrivateKey
++++}
++++
++++func (k *rsaPrivateKey) toMap() map[string]interface{} {
++++ k.Precompute() // Make sure the precomputed values are stored.
++++ jwk := k.rsaPublicKey.toMap()
++++
++++ jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
++++ jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
++++ jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
++++ jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
++++ jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
++++ jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
++++
++++ otherPrimes := k.Primes[2:]
++++
++++ if len(otherPrimes) > 0 {
++++ otherPrimesInfo := make([]interface{}, len(otherPrimes))
++++ for i, r := range otherPrimes {
++++ otherPrimeInfo := make(map[string]string, 3)
++++ otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
++++ crtVal := k.Precomputed.CRTValues[i]
++++ otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
++++ otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
++++ otherPrimesInfo[i] = otherPrimeInfo
++++ }
++++ jwk["oth"] = otherPrimesInfo
++++ }
++++
++++ return jwk
++++}
++++
++++// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
++++// RSA keys.
++++func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
++++ return json.Marshal(k.toMap())
++++}
++++
++++// PEMBlock serializes this Private Key to DER-encoded PKIX format.
++++func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
++++ derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
++++ k.extended["keyID"] = k.KeyID() // For display purposes.
++++ return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
++++}
++++
++++func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
++++ // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
++++ // only the private key exponent 'd' is REQUIRED, the others are just for
++++ // signature/decryption optimizations and SHOULD be included when the JWK
++++ // is produced. We MAY choose to accept a JWK which only includes 'd', but
++++ // we're going to go ahead and not choose to accept it without the extra
++++ // fields. Only the 'oth' field will be optional (for multi-prime keys).
++++ privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
++++ }
++++ firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
++++ }
++++ secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
++++ }
++++ firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
++++ }
++++ secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
++++ }
++++ crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
++++ }
++++
++++ var oth interface{}
++++ if _, ok := jwk["oth"]; ok {
++++ oth = jwk["oth"]
++++ delete(jwk, "oth")
++++ }
++++
++++ // JWK key type (kty) has already been determined to be "RSA".
++++ // Need to extract the public key information, then extract the private
++++ // key values.
++++ publicKey, err := rsaPublicKeyFromMap(jwk)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ privateKey := &rsa.PrivateKey{
++++ PublicKey: *publicKey.PublicKey,
++++ D: privateExponent,
++++ Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
++++ Precomputed: rsa.PrecomputedValues{
++++ Dp: firstFactorCRT,
++++ Dq: secondFactorCRT,
++++ Qinv: crtCoeff,
++++ },
++++ }
++++
++++ if oth != nil {
++++ // Should be an array of more JSON objects.
++++ otherPrimesInfo, ok := oth.([]interface{})
++++ if !ok {
++++ return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
++++ }
++++ numOtherPrimeFactors := len(otherPrimesInfo)
++++ if numOtherPrimeFactors == 0 {
++++ return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
++++ }
++++ otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
++++ productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
++++ crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
++++
++++ for i, val := range otherPrimesInfo {
++++ otherPrimeinfo, ok := val.(map[string]interface{})
++++ if !ok {
++++ return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
++++ }
++++
++++ otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
++++ }
++++ otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
++++ }
++++ otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
++++ if err != nil {
++++ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
++++ }
++++
++++ crtValue := crtValues[i]
++++ crtValue.Exp = otherFactorCRT
++++ crtValue.Coeff = otherCrtCoeff
++++ crtValue.R = productOfPrimes
++++ otherPrimeFactors[i] = otherPrimeFactor
++++ productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
++++ }
++++
++++ privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
++++ privateKey.Precomputed.CRTValues = crtValues
++++ }
++++
++++ key := &rsaPrivateKey{
++++ rsaPublicKey: *publicKey,
++++ PrivateKey: privateKey,
++++ }
++++
++++ return key, nil
++++}
++++
++++/*
++++ * Key Generation Functions.
++++ */
++++
++++func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
++++ k = new(rsaPrivateKey)
++++ k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
++++ k.extended = make(map[string]interface{})
++++
++++ return
++++}
++++
++++// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
++++func GenerateRSA2048PrivateKey() (PrivateKey, error) {
++++ k, err := generateRSAPrivateKey(2048)
++++ if err != nil {
++++ return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
++++ }
++++
++++ return k, nil
++++}
++++
++++// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
++++func GenerateRSA3072PrivateKey() (PrivateKey, error) {
++++ k, err := generateRSAPrivateKey(3072)
++++ if err != nil {
++++ return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
++++ }
++++
++++ return k, nil
++++}
++++
++++// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
++++func GenerateRSA4096PrivateKey() (PrivateKey, error) {
++++ k, err := generateRSAPrivateKey(4096)
++++ if err != nil {
++++ return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
++++ }
++++
++++ return k, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "bytes"
++++ "encoding/json"
++++ "log"
++++ "testing"
++++)
++++
++++var rsaKeys []PrivateKey
++++
++++func init() {
++++ var err error
++++ rsaKeys, err = generateRSATestKeys()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++}
++++
++++func generateRSATestKeys() (keys []PrivateKey, err error) {
++++ log.Println("Generating RSA 2048-bit Test Key")
++++ rsa2048Key, err := GenerateRSA2048PrivateKey()
++++ if err != nil {
++++ return
++++ }
++++
++++ log.Println("Generating RSA 3072-bit Test Key")
++++ rsa3072Key, err := GenerateRSA3072PrivateKey()
++++ if err != nil {
++++ return
++++ }
++++
++++ log.Println("Generating RSA 4096-bit Test Key")
++++ rsa4096Key, err := GenerateRSA4096PrivateKey()
++++ if err != nil {
++++ return
++++ }
++++
++++ log.Println("Done generating RSA Test Keys!")
++++ keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key}
++++
++++ return
++++}
++++
++++func TestRSAKeys(t *testing.T) {
++++ for _, rsaKey := range rsaKeys {
++++ if rsaKey.KeyType() != "RSA" {
++++ t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType())
++++ }
++++ }
++++}
++++
++++func TestRSASignVerify(t *testing.T) {
++++ message := "Hello, World!"
++++ data := bytes.NewReader([]byte(message))
++++
++++ sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512}
++++
++++ for i, rsaKey := range rsaKeys {
++++ sigAlg := sigAlgs[i]
++++
++++ t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID())
++++
++++ data.Seek(0, 0) // Reset the byte reader
++++
++++ // Sign
++++ sig, alg, err := rsaKey.Sign(data, sigAlg.HashID())
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ data.Seek(0, 0) // Reset the byte reader
++++
++++ // Verify
++++ err = rsaKey.Verify(data, alg, sig)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ }
++++}
++++
++++func TestMarshalUnmarshalRSAKeys(t *testing.T) {
++++ data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test."))
++++ sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512}
++++
++++ for i, rsaKey := range rsaKeys {
++++ sigAlg := sigAlgs[i]
++++ privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ")
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ t.Logf("JWK Private Key: %s", string(privateJWKJSON))
++++ t.Logf("JWK Public Key: %s", string(publicJWKJSON))
++++
++++ privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ // Ensure we can sign/verify a message with the unmarshalled keys.
++++ data.Seek(0, 0) // Reset the byte reader
++++ signature, alg, err := privKey2.Sign(data, sigAlg.HashID())
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ data.Seek(0, 0) // Reset the byte reader
++++ err = pubKey2.Verify(data, alg, signature)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ // It's a good idea to validate the Private Key to make sure our
++++ // (un)marshal process didn't corrupt the extra parameters.
++++ k := privKey2.(*rsaPrivateKey)
++++ err = k.PrivateKey.Validate()
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++ }
++++}
++++
++++func TestFromCryptoRSAKeys(t *testing.T) {
++++ for _, rsaKey := range rsaKeys {
++++ cryptoPrivateKey := rsaKey.CryptoPrivateKey()
++++ cryptoPublicKey := rsaKey.CryptoPublicKey()
++++
++++ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if pubKey.KeyID() != rsaKey.KeyID() {
++++ t.Fatal("public key key ID mismatch")
++++ }
++++
++++ privKey, err := FromCryptoPrivateKey(cryptoPrivateKey)
++++ if err != nil {
++++ t.Fatal(err)
++++ }
++++
++++ if privKey.KeyID() != rsaKey.KeyID() {
++++ t.Fatal("public key key ID mismatch")
++++ }
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package testutil
++++
++++import (
++++ "crypto"
++++ "crypto/rand"
++++ "crypto/x509"
++++ "crypto/x509/pkix"
++++ "math/big"
++++ "time"
++++)
++++
++++// GenerateTrustCA generates a new certificate authority for testing.
++++func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) {
++++ cert := &x509.Certificate{
++++ SerialNumber: big.NewInt(0),
++++ Subject: pkix.Name{
++++ CommonName: "CA Root",
++++ },
++++ NotBefore: time.Now().Add(-time.Second),
++++ NotAfter: time.Now().Add(time.Hour),
++++ IsCA: true,
++++ KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
++++ BasicConstraintsValid: true,
++++ }
++++
++++ certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ cert, err = x509.ParseCertificate(certDER)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ return cert, nil
++++}
++++
++++// GenerateIntermediate generates an intermediate certificate for testing using
++++// the parent certificate (likely a CA) and the provided keys.
++++func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) {
++++ cert := &x509.Certificate{
++++ SerialNumber: big.NewInt(0),
++++ Subject: pkix.Name{
++++ CommonName: "Intermediate",
++++ },
++++ NotBefore: time.Now().Add(-time.Second),
++++ NotAfter: time.Now().Add(time.Hour),
++++ IsCA: true,
++++ KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
++++ BasicConstraintsValid: true,
++++ }
++++
++++ certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ cert, err = x509.ParseCertificate(certDER)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ return cert, nil
++++}
++++
++++// GenerateTrustCert generates a new trust certificate for testing. Unlike the
++++// intermediate certificates, this certificate should be used for signature
++++// only, not creating certificates.
++++func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) {
++++ cert := &x509.Certificate{
++++ SerialNumber: big.NewInt(0),
++++ Subject: pkix.Name{
++++ CommonName: "Trust Cert",
++++ },
++++ NotBefore: time.Now().Add(-time.Second),
++++ NotAfter: time.Now().Add(time.Hour),
++++ IsCA: true,
++++ KeyUsage: x509.KeyUsageDigitalSignature,
++++ BasicConstraintsValid: true,
++++ }
++++
++++ certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ cert, err = x509.ParseCertificate(certDER)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ return cert, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++## Libtrust TLS Config Demo
++++
++++This program generates key pairs and trust files for a TLS client and server.
++++
++++To generate the keys, run:
++++
++++```
++++$ go run genkeys.go
++++```
++++
++++The generated files are:
++++
++++```
++++$ ls -l client_data/ server_data/
++++client_data/:
++++total 24
++++-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json
++++-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json
++++-rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json
++++
++++server_data/:
++++total 24
++++-rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json
++++-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json
++++-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json
++++```
++++
++++The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `<HOSTNAME_OR_IP>:<PORT>`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client.
++++
++++To start the server, run:
++++
++++```
++++$ go run server.go
++++```
++++
++++This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message.
++++
++++To make a request using the client, run:
++++
++++```
++++$ go run client.go
++++```
++++
++++This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server.
++++
++++The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running).
++++
++++```
++++curl --cert cert.pem --key key.pem -k https://localhost:8888
++++```
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "crypto/tls"
++++ "fmt"
++++ "io/ioutil"
++++ "log"
++++ "net"
++++ "net/http"
++++
++++ "github.com/docker/libtrust"
++++)
++++
++++var (
++++ serverAddress = "localhost:8888"
++++ privateKeyFilename = "client_data/private_key.pem"
++++ trustedHostsFilename = "client_data/trusted_hosts.pem"
++++)
++++
++++func main() {
++++ // Load Client Key.
++++ clientKey, err := libtrust.LoadKeyFile(privateKeyFilename)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Generate Client Certificate.
++++ selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Load trusted host keys.
++++ hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Ensure the host we want to connect to is trusted!
++++ host, _, err := net.SplitHostPort(serverAddress)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false)
++++ if err != nil {
++++ log.Fatalf("%q is not a known and trusted host", host)
++++ }
++++
++++ // Generate a CA pool with the trusted host's key.
++++ caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Create HTTP Client.
++++ client := &http.Client{
++++ Transport: &http.Transport{
++++ TLSClientConfig: &tls.Config{
++++ Certificates: []tls.Certificate{
++++ tls.Certificate{
++++ Certificate: [][]byte{selfSignedClientCert.Raw},
++++ PrivateKey: clientKey.CryptoPrivateKey(),
++++ Leaf: selfSignedClientCert,
++++ },
++++ },
++++ RootCAs: caPool,
++++ },
++++ },
++++ }
++++
++++ var makeRequest = func(url string) {
++++ resp, err := client.Get(url)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ defer resp.Body.Close()
++++
++++ body, err := ioutil.ReadAll(resp.Body)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ log.Println(resp.Status)
++++ log.Println(string(body))
++++ }
++++
++++ // Make the request to the trusted server!
++++ makeRequest(fmt.Sprintf("https://%s", serverAddress))
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "encoding/pem"
++++ "fmt"
++++ "log"
++++ "net"
++++
++++ "github.com/docker/libtrust"
++++)
++++
++++var (
++++ serverAddress = "localhost:8888"
++++ clientPrivateKeyFilename = "client_data/private_key.pem"
++++ trustedHostsFilename = "client_data/trusted_hosts.pem"
++++)
++++
++++func main() {
++++ key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ keyPEMBlock, err := key.PEMBlock()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ encodedPrivKey := pem.EncodeToMemory(keyPEMBlock)
++++ fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey))
++++
++++ cert, err := libtrust.GenerateSelfSignedClientCert(key)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
++++ fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert))
++++
++++ trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ hostname, _, err := net.SplitHostPort(serverAddress)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0])
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw})
++++ fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert))
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "log"
++++
++++ "github.com/docker/libtrust"
++++)
++++
++++func main() {
++++ // Generate client key.
++++ clientKey, err := libtrust.GenerateECP256PrivateKey()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Add a comment for the client key.
++++ clientKey.AddExtendedField("comment", "TLS Demo Client")
++++
++++ // Save the client key, public and private versions.
++++ err = libtrust.SaveKey("client_data/private_key.pem", clientKey)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey())
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Generate server key.
++++ serverKey, err := libtrust.GenerateECP256PrivateKey()
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Set the list of addresses to use for the server.
++++ serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"})
++++
++++ // Save the server key, public and private versions.
++++ err = libtrust.SaveKey("server_data/private_key.pem", serverKey)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey())
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Generate Authorized Keys file for server.
++++ err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey())
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Generate Known Host Keys file for client.
++++ err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey())
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package main
++++
++++import (
++++ "crypto/tls"
++++ "fmt"
++++ "html"
++++ "log"
++++ "net"
++++ "net/http"
++++
++++ "github.com/docker/libtrust"
++++)
++++
++++var (
++++ serverAddress = "localhost:8888"
++++ privateKeyFilename = "server_data/private_key.pem"
++++ authorizedClientsFilename = "server_data/trusted_clients.pem"
++++)
++++
++++func requestHandler(w http.ResponseWriter, r *http.Request) {
++++ clientCert := r.TLS.PeerCertificates[0]
++++ keyID := clientCert.Subject.CommonName
++++ log.Printf("Request from keyID: %s\n", keyID)
++++ fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID))
++++}
++++
++++func main() {
++++ // Load server key.
++++ serverKey, err := libtrust.LoadKeyFile(privateKeyFilename)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Generate server certificate.
++++ selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert(
++++ serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")},
++++ )
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Load authorized client keys.
++++ authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Create CA pool using trusted client keys.
++++ caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++
++++ // Create TLS config, requiring client certificates.
++++ tlsConfig := &tls.Config{
++++ Certificates: []tls.Certificate{
++++ tls.Certificate{
++++ Certificate: [][]byte{selfSignedServerCert.Raw},
++++ PrivateKey: serverKey.CryptoPrivateKey(),
++++ Leaf: selfSignedServerCert,
++++ },
++++ },
++++ ClientAuth: tls.RequireAndVerifyClientCert,
++++ ClientCAs: caPool,
++++ }
++++
++++ // Create HTTP server with simple request handler.
++++ server := &http.Server{
++++ Addr: serverAddress,
++++ Handler: http.HandlerFunc(requestHandler),
++++ }
++++
++++ // Listen and server HTTPS using the libtrust TLS config.
++++ listener, err := net.Listen("tcp", server.Addr)
++++ if err != nil {
++++ log.Fatal(err)
++++ }
++++ tlsListener := tls.NewListener(listener, tlsConfig)
++++ server.Serve(tlsListener)
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package trustgraph
++++
++++import "github.com/docker/libtrust"
++++
++++// TrustGraph represents a graph of authorization mapping
++++// public keys to nodes and grants between nodes.
++++type TrustGraph interface {
++++ // Verifies that the given public key is allowed to perform
++++ // the given action on the given node according to the trust
++++ // graph.
++++ Verify(libtrust.PublicKey, string, uint16) (bool, error)
++++
++++ // GetGrants returns an array of all grant chains which are used to
++++ // allow the requested permission.
++++ GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error)
++++}
++++
++++// Grant represents a transfer of permission from one part of the
++++// trust graph to another. This is the only way to delegate
++++// permission between two different sub trees in the graph.
++++type Grant struct {
++++ // Subject is the namespace being granted
++++ Subject string
++++
++++ // Permissions is a bit map of permissions
++++ Permission uint16
++++
++++ // Grantee represents the node being granted
++++ // a permission scope. The grantee can be
++++ // either a namespace item or a key id where namespace
++++ // items will always start with a '/'.
++++ Grantee string
++++
++++ // statement represents the statement used to create
++++ // this object.
++++ statement *Statement
++++}
++++
++++// Permissions
++++// Read node 0x01 (can read node, no sub nodes)
++++// Write node 0x02 (can write to node object, cannot create subnodes)
++++// Read subtree 0x04 (delegates read to each sub node)
++++// Write subtree 0x08 (delegates write to each sub node, included create on the subject)
++++//
++++// Permission shortcuts
++++// ReadItem = 0x01
++++// WriteItem = 0x03
++++// ReadAccess = 0x07
++++// WriteAccess = 0x0F
++++// Delegate = 0x0F
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package trustgraph
++++
++++import (
++++ "strings"
++++
++++ "github.com/docker/libtrust"
++++)
++++
++++type grantNode struct {
++++ grants []*Grant
++++ children map[string]*grantNode
++++}
++++
++++type memoryGraph struct {
++++ roots map[string]*grantNode
++++}
++++
++++func newGrantNode() *grantNode {
++++ return &grantNode{
++++ grants: []*Grant{},
++++ children: map[string]*grantNode{},
++++ }
++++}
++++
++++// NewMemoryGraph returns a new in memory trust graph created from
++++// a static list of grants. This graph is immutable after creation
++++// and any alterations should create a new instance.
++++func NewMemoryGraph(grants []*Grant) TrustGraph {
++++ roots := map[string]*grantNode{}
++++ for _, grant := range grants {
++++ parts := strings.Split(grant.Grantee, "/")
++++ nodes := roots
++++ var node *grantNode
++++ var nodeOk bool
++++ for _, part := range parts {
++++ node, nodeOk = nodes[part]
++++ if !nodeOk {
++++ node = newGrantNode()
++++ nodes[part] = node
++++ }
++++ if part != "" {
++++ node.grants = append(node.grants, grant)
++++ }
++++ nodes = node.children
++++ }
++++ }
++++ return &memoryGraph{roots}
++++}
++++
++++func (g *memoryGraph) getGrants(name string) []*Grant {
++++ nameParts := strings.Split(name, "/")
++++ nodes := g.roots
++++ var node *grantNode
++++ var nodeOk bool
++++ for _, part := range nameParts {
++++ node, nodeOk = nodes[part]
++++ if !nodeOk {
++++ return nil
++++ }
++++ nodes = node.children
++++ }
++++ return node.grants
++++}
++++
++++func isSubName(name, sub string) bool {
++++ if strings.HasPrefix(name, sub) {
++++ if len(name) == len(sub) || name[len(sub)] == '/' {
++++ return true
++++ }
++++ }
++++ return false
++++}
++++
++++type walkFunc func(*Grant, []*Grant) bool
++++
++++func foundWalkFunc(*Grant, []*Grant) bool {
++++ return true
++++}
++++
++++func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool {
++++ if visited == nil {
++++ visited = map[*Grant]bool{}
++++ }
++++ grants := g.getGrants(start)
++++ subGrants := make([]*Grant, 0, len(grants))
++++ for _, grant := range grants {
++++ if visited[grant] {
++++ continue
++++ }
++++ visited[grant] = true
++++ if grant.Permission&permission == permission {
++++ if isSubName(target, grant.Subject) {
++++ if f(grant, chain) {
++++ return true
++++ }
++++ } else {
++++ subGrants = append(subGrants, grant)
++++ }
++++ }
++++ }
++++ for _, grant := range subGrants {
++++ var chainCopy []*Grant
++++ if collect {
++++ chainCopy = make([]*Grant, len(chain)+1)
++++ copy(chainCopy, chain)
++++ chainCopy[len(chainCopy)-1] = grant
++++ } else {
++++ chainCopy = nil
++++ }
++++
++++ if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) {
++++ return true
++++ }
++++ }
++++ return false
++++}
++++
++++func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) {
++++ return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil
++++}
++++
++++func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) {
++++ grants := [][]*Grant{}
++++ collect := func(grant *Grant, chain []*Grant) bool {
++++ grantChain := make([]*Grant, len(chain)+1)
++++ copy(grantChain, chain)
++++ grantChain[len(grantChain)-1] = grant
++++ grants = append(grants, grantChain)
++++ return false
++++ }
++++ g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true)
++++ return grants, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package trustgraph
++++
++++import (
++++ "fmt"
++++ "testing"
++++
++++ "github.com/docker/libtrust"
++++)
++++
++++func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) {
++++ grants := make([]*Grant, count)
++++ keys := make([]libtrust.PrivateKey, count)
++++ for i := 0; i < count; i++ {
++++ pk, err := libtrust.GenerateECP256PrivateKey()
++++ if err != nil {
++++ panic(err)
++++ }
++++ grant := &Grant{
++++ Subject: fmt.Sprintf("/user-%d", i+1),
++++ Permission: 0x0f,
++++ Grantee: pk.KeyID(),
++++ }
++++ keys[i] = pk
++++ grants[i] = grant
++++ }
++++ return grants, keys
++++}
++++
++++func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) {
++++ if ok, err := g.Verify(k, target, permission); err != nil {
++++ t.Fatalf("Unexpected error during verification: %s", err)
++++ } else if !ok {
++++ t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target)
++++ }
++++}
++++
++++func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) {
++++ if ok, err := g.Verify(k, target, permission); err != nil {
++++ t.Fatalf("Unexpected error during verification: %s", err)
++++ } else if ok {
++++ t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target)
++++ }
++++}
++++
++++func TestVerify(t *testing.T) {
++++ grants, keys := createTestKeysAndGrants(4)
++++ extraGrants := make([]*Grant, 3)
++++ extraGrants[0] = &Grant{
++++ Subject: "/user-3",
++++ Permission: 0x0f,
++++ Grantee: "/user-2",
++++ }
++++ extraGrants[1] = &Grant{
++++ Subject: "/user-3/sub-project",
++++ Permission: 0x0f,
++++ Grantee: "/user-4",
++++ }
++++ extraGrants[2] = &Grant{
++++ Subject: "/user-4",
++++ Permission: 0x07,
++++ Grantee: "/user-1",
++++ }
++++ grants = append(grants, extraGrants...)
++++
++++ g := NewMemoryGraph(grants)
++++
++++ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f)
++++ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f)
++++ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f)
++++ testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f)
++++ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f)
++++ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f)
++++ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f)
++++
++++ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f)
++++ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f)
++++ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f)
++++ testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f)
++++ testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f)
++++ testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f)
++++ testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f)
++++}
++++
++++func TestCircularWalk(t *testing.T) {
++++ grants, keys := createTestKeysAndGrants(3)
++++ user1Grant := &Grant{
++++ Subject: "/user-2",
++++ Permission: 0x0f,
++++ Grantee: "/user-1",
++++ }
++++ user2Grant := &Grant{
++++ Subject: "/user-1",
++++ Permission: 0x0f,
++++ Grantee: "/user-2",
++++ }
++++ grants = append(grants, user1Grant, user2Grant)
++++
++++ g := NewMemoryGraph(grants)
++++
++++ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f)
++++ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f)
++++ testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f)
++++
++++ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f)
++++ testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f)
++++}
++++
++++func assertGrantSame(t *testing.T, actual, expected *Grant) {
++++ if actual != expected {
++++ t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual)
++++ }
++++}
++++
++++func TestGetGrants(t *testing.T) {
++++ grants, keys := createTestKeysAndGrants(5)
++++ extraGrants := make([]*Grant, 4)
++++ extraGrants[0] = &Grant{
++++ Subject: "/user-3/friend-project",
++++ Permission: 0x0f,
++++ Grantee: "/user-2/friends",
++++ }
++++ extraGrants[1] = &Grant{
++++ Subject: "/user-3/sub-project",
++++ Permission: 0x0f,
++++ Grantee: "/user-4",
++++ }
++++ extraGrants[2] = &Grant{
++++ Subject: "/user-2/friends",
++++ Permission: 0x0f,
++++ Grantee: "/user-5/fun-project",
++++ }
++++ extraGrants[3] = &Grant{
++++ Subject: "/user-5/fun-project",
++++ Permission: 0x0f,
++++ Grantee: "/user-1",
++++ }
++++ grants = append(grants, extraGrants...)
++++
++++ g := NewMemoryGraph(grants)
++++
++++ grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f)
++++ if err != nil {
++++ t.Fatalf("Error getting grants: %s", err)
++++ }
++++ if len(grantChains) != 1 {
++++ t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains))
++++ }
++++ if len(grantChains[0]) != 2 {
++++ t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0]))
++++ }
++++ assertGrantSame(t, grantChains[0][0], grants[3])
++++ assertGrantSame(t, grantChains[0][1], extraGrants[1])
++++
++++ grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f)
++++ if err != nil {
++++ t.Fatalf("Error getting grants: %s", err)
++++ }
++++ if len(grantChains) != 1 {
++++ t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains))
++++ }
++++ if len(grantChains[0]) != 4 {
++++ t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0]))
++++ }
++++ assertGrantSame(t, grantChains[0][0], grants[0])
++++ assertGrantSame(t, grantChains[0][1], extraGrants[3])
++++ assertGrantSame(t, grantChains[0][2], extraGrants[2])
++++ assertGrantSame(t, grantChains[0][3], extraGrants[0])
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package trustgraph
++++
++++import (
++++ "crypto/x509"
++++ "encoding/json"
++++ "io"
++++ "io/ioutil"
++++ "sort"
++++ "strings"
++++ "time"
++++
++++ "github.com/docker/libtrust"
++++)
++++
++++type jsonGrant struct {
++++ Subject string `json:"subject"`
++++ Permission uint16 `json:"permission"`
++++ Grantee string `json:"grantee"`
++++}
++++
++++type jsonRevocation struct {
++++ Subject string `json:"subject"`
++++ Revocation uint16 `json:"revocation"`
++++ Grantee string `json:"grantee"`
++++}
++++
++++type jsonStatement struct {
++++ Revocations []*jsonRevocation `json:"revocations"`
++++ Grants []*jsonGrant `json:"grants"`
++++ Expiration time.Time `json:"expiration"`
++++ IssuedAt time.Time `json:"issuedAt"`
++++}
++++
++++func (g *jsonGrant) Grant(statement *Statement) *Grant {
++++ return &Grant{
++++ Subject: g.Subject,
++++ Permission: g.Permission,
++++ Grantee: g.Grantee,
++++ statement: statement,
++++ }
++++}
++++
++++// Statement represents a set of grants made from a verifiable
++++// authority. A statement has an expiration associated with it
++++// set by the authority.
++++type Statement struct {
++++ jsonStatement
++++
++++ signature *libtrust.JSONSignature
++++}
++++
++++// IsExpired returns whether the statement has expired
++++func (s *Statement) IsExpired() bool {
++++ return s.Expiration.Before(time.Now().Add(-10 * time.Second))
++++}
++++
++++// Bytes returns an indented json representation of the statement
++++// in a byte array. This value can be written to a file or stream
++++// without alteration.
++++func (s *Statement) Bytes() ([]byte, error) {
++++ return s.signature.PrettySignature("signatures")
++++}
++++
++++// LoadStatement loads and verifies a statement from an input stream.
++++func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) {
++++ b, err := ioutil.ReadAll(r)
++++ if err != nil {
++++ return nil, err
++++ }
++++ js, err := libtrust.ParsePrettySignature(b, "signatures")
++++ if err != nil {
++++ return nil, err
++++ }
++++ payload, err := js.Payload()
++++ if err != nil {
++++ return nil, err
++++ }
++++ var statement Statement
++++ err = json.Unmarshal(payload, &statement.jsonStatement)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ if authority == nil {
++++ _, err = js.Verify()
++++ if err != nil {
++++ return nil, err
++++ }
++++ } else {
++++ _, err = js.VerifyChains(authority)
++++ if err != nil {
++++ return nil, err
++++ }
++++ }
++++ statement.signature = js
++++
++++ return &statement, nil
++++}
++++
++++// CreateStatements creates and signs a statement from a stream of grants
++++// and revocations in a JSON array.
++++func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) {
++++ var statement Statement
++++ err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants)
++++ if err != nil {
++++ return nil, err
++++ }
++++ err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations)
++++ if err != nil {
++++ return nil, err
++++ }
++++ statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration)
++++ statement.jsonStatement.IssuedAt = time.Now().UTC()
++++
++++ b, err := json.MarshalIndent(&statement.jsonStatement, "", " ")
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ statement.signature, err = libtrust.NewJSONSignature(b)
++++ if err != nil {
++++ return nil, err
++++ }
++++ err = statement.signature.SignWithChain(key, chain)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ return &statement, nil
++++}
++++
++++type statementList []*Statement
++++
++++func (s statementList) Len() int {
++++ return len(s)
++++}
++++
++++func (s statementList) Less(i, j int) bool {
++++ return s[i].IssuedAt.Before(s[j].IssuedAt)
++++}
++++
++++func (s statementList) Swap(i, j int) {
++++ s[i], s[j] = s[j], s[i]
++++}
++++
++++// CollapseStatements returns a single list of the valid statements as well as the
++++// time when the next grant will expire.
++++func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) {
++++ sorted := make(statementList, 0, len(statements))
++++ for _, statement := range statements {
++++ if useExpired || !statement.IsExpired() {
++++ sorted = append(sorted, statement)
++++ }
++++ }
++++ sort.Sort(sorted)
++++
++++ var minExpired time.Time
++++ var grantCount int
++++ roots := map[string]*grantNode{}
++++ for i, statement := range sorted {
++++ if statement.Expiration.Before(minExpired) || i == 0 {
++++ minExpired = statement.Expiration
++++ }
++++ for _, grant := range statement.Grants {
++++ parts := strings.Split(grant.Grantee, "/")
++++ nodes := roots
++++ g := grant.Grant(statement)
++++ grantCount = grantCount + 1
++++
++++ for _, part := range parts {
++++ node, nodeOk := nodes[part]
++++ if !nodeOk {
++++ node = newGrantNode()
++++ nodes[part] = node
++++ }
++++ node.grants = append(node.grants, g)
++++ nodes = node.children
++++ }
++++ }
++++
++++ for _, revocation := range statement.Revocations {
++++ parts := strings.Split(revocation.Grantee, "/")
++++ nodes := roots
++++
++++ var node *grantNode
++++ var nodeOk bool
++++ for _, part := range parts {
++++ node, nodeOk = nodes[part]
++++ if !nodeOk {
++++ break
++++ }
++++ nodes = node.children
++++ }
++++ if node != nil {
++++ for _, grant := range node.grants {
++++ if isSubName(grant.Subject, revocation.Subject) {
++++ grant.Permission = grant.Permission &^ revocation.Revocation
++++ }
++++ }
++++ }
++++ }
++++ }
++++
++++ retGrants := make([]*Grant, 0, grantCount)
++++ for _, rootNodes := range roots {
++++ retGrants = append(retGrants, rootNodes.grants...)
++++ }
++++
++++ return retGrants, minExpired, nil
++++}
++++
++++// FilterStatements filters the statements to statements including the given grants.
++++func FilterStatements(grants []*Grant) ([]*Statement, error) {
++++ statements := map[*Statement]bool{}
++++ for _, grant := range grants {
++++ if grant.statement != nil {
++++ statements[grant.statement] = true
++++ }
++++ }
++++ retStatements := make([]*Statement, len(statements))
++++ var i int
++++ for statement := range statements {
++++ retStatements[i] = statement
++++ i++
++++ }
++++ return retStatements, nil
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package trustgraph
++++
++++import (
++++ "bytes"
++++ "crypto/x509"
++++ "encoding/json"
++++ "testing"
++++ "time"
++++
++++ "github.com/docker/libtrust"
++++ "github.com/docker/libtrust/testutil"
++++)
++++
++++const testStatementExpiration = time.Hour * 5
++++
++++func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) {
++++ var statement Statement
++++
++++ statement.Grants = make([]*jsonGrant, len(grants))
++++ for i, grant := range grants {
++++ statement.Grants[i] = &jsonGrant{
++++ Subject: grant.Subject,
++++ Permission: grant.Permission,
++++ Grantee: grant.Grantee,
++++ }
++++ }
++++ statement.IssuedAt = time.Now()
++++ statement.Expiration = time.Now().Add(testStatementExpiration)
++++ statement.Revocations = make([]*jsonRevocation, 0)
++++
++++ marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ")
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ sig, err := libtrust.NewJSONSignature(marshalled)
++++ if err != nil {
++++ return nil, err
++++ }
++++ err = sig.SignWithChain(key, chain)
++++ if err != nil {
++++ return nil, err
++++ }
++++ statement.signature = sig
++++
++++ return &statement, nil
++++}
++++
++++func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) {
++++ caKey, err := libtrust.GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generating key: %s", err)
++++ }
++++ ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey())
++++ if err != nil {
++++ t.Fatalf("Error generating ca: %s", err)
++++ }
++++
++++ parent := ca
++++ parentKey := caKey
++++ chain := make([]*x509.Certificate, chainLen)
++++ for i := chainLen - 1; i > 0; i-- {
++++ intermediatekey, err := libtrust.GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generate key: %s", err)
++++ }
++++ chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent)
++++ if err != nil {
++++ t.Fatalf("Error generating intermdiate certificate: %s", err)
++++ }
++++ parent = chain[i]
++++ parentKey = intermediatekey
++++ }
++++ trustKey, err := libtrust.GenerateECP256PrivateKey()
++++ if err != nil {
++++ t.Fatalf("Error generate key: %s", err)
++++ }
++++ chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent)
++++ if err != nil {
++++ t.Fatalf("Error generate trust cert: %s", err)
++++ }
++++
++++ caPool := x509.NewCertPool()
++++ caPool.AddCert(ca)
++++
++++ return trustKey, caPool, chain
++++}
++++
++++func TestLoadStatement(t *testing.T) {
++++ grantCount := 4
++++ grants, _ := createTestKeysAndGrants(grantCount)
++++
++++ trustKey, caPool, chain := generateTrustChain(t, 6)
++++
++++ statement, err := generateStatement(grants, trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++
++++ statementBytes, err := statement.Bytes()
++++ if err != nil {
++++ t.Fatalf("Error getting statement bytes: %s", err)
++++ }
++++
++++ s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool)
++++ if err != nil {
++++ t.Fatalf("Error loading statement: %s", err)
++++ }
++++ if len(s2.Grants) != grantCount {
++++ t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants))
++++ }
++++
++++ pool := x509.NewCertPool()
++++ _, err = LoadStatement(bytes.NewReader(statementBytes), pool)
++++ if err == nil {
++++ t.Fatalf("No error thrown verifying without an authority")
++++ } else if _, ok := err.(x509.UnknownAuthorityError); !ok {
++++ t.Fatalf("Unexpected error verifying without authority: %s", err)
++++ }
++++
++++ s2, err = LoadStatement(bytes.NewReader(statementBytes), nil)
++++ if err != nil {
++++ t.Fatalf("Error loading statement: %s", err)
++++ }
++++ if len(s2.Grants) != grantCount {
++++ t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants))
++++ }
++++
++++ badData := make([]byte, len(statementBytes))
++++ copy(badData, statementBytes)
++++ badData[0] = '['
++++ _, err = LoadStatement(bytes.NewReader(badData), nil)
++++ if err == nil {
++++ t.Fatalf("No error thrown parsing bad json")
++++ }
++++
++++ alteredData := make([]byte, len(statementBytes))
++++ copy(alteredData, statementBytes)
++++ alteredData[30] = '0'
++++ _, err = LoadStatement(bytes.NewReader(alteredData), nil)
++++ if err == nil {
++++ t.Fatalf("No error thrown from bad data")
++++ }
++++}
++++
++++func TestCollapseGrants(t *testing.T) {
++++ grantCount := 8
++++ grants, keys := createTestKeysAndGrants(grantCount)
++++ linkGrants := make([]*Grant, 4)
++++ linkGrants[0] = &Grant{
++++ Subject: "/user-3",
++++ Permission: 0x0f,
++++ Grantee: "/user-2",
++++ }
++++ linkGrants[1] = &Grant{
++++ Subject: "/user-3/sub-project",
++++ Permission: 0x0f,
++++ Grantee: "/user-4",
++++ }
++++ linkGrants[2] = &Grant{
++++ Subject: "/user-6",
++++ Permission: 0x0f,
++++ Grantee: "/user-7",
++++ }
++++ linkGrants[3] = &Grant{
++++ Subject: "/user-6/sub-project/specific-app",
++++ Permission: 0x0f,
++++ Grantee: "/user-5",
++++ }
++++ trustKey, pool, chain := generateTrustChain(t, 3)
++++
++++ statements := make([]*Statement, 3)
++++ var err error
++++ statements[0], err = generateStatement(grants[0:4], trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++ statements[1], err = generateStatement(grants[4:], trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++ statements[2], err = generateStatement(linkGrants, trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++
++++ statementsCopy := make([]*Statement, len(statements))
++++ for i, statement := range statements {
++++ b, err := statement.Bytes()
++++ if err != nil {
++++ t.Fatalf("Error getting statement bytes: %s", err)
++++ }
++++ verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool)
++++ if err != nil {
++++ t.Fatalf("Error loading statement: %s", err)
++++ }
++++ // Force sort by reversing order
++++ statementsCopy[len(statementsCopy)-i-1] = verifiedStatement
++++ }
++++ statements = statementsCopy
++++
++++ collapsedGrants, expiration, err := CollapseStatements(statements, false)
++++ if len(collapsedGrants) != 12 {
++++ t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants))
++++ }
++++ if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) {
++++ t.Fatalf("Unexpected expiration time: %s", expiration.String())
++++ }
++++ g := NewMemoryGraph(collapsedGrants)
++++
++++ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f)
++++ testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f)
++++ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f)
++++ testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f)
++++ testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f)
++++ testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f)
++++ testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f)
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f)
++++ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f)
++++ testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f)
++++ testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f)
++++ testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f)
++++
++++ testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f)
++++ testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f)
++++ testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f)
++++
++++ // Add revocation grant
++++ statements = append(statements, &Statement{
++++ jsonStatement{
++++ IssuedAt: time.Now(),
++++ Expiration: time.Now().Add(testStatementExpiration),
++++ Grants: []*jsonGrant{},
++++ Revocations: []*jsonRevocation{
++++ &jsonRevocation{
++++ Subject: "/user-1",
++++ Revocation: 0x0f,
++++ Grantee: keys[0].KeyID(),
++++ },
++++ &jsonRevocation{
++++ Subject: "/user-2",
++++ Revocation: 0x08,
++++ Grantee: keys[1].KeyID(),
++++ },
++++ &jsonRevocation{
++++ Subject: "/user-6",
++++ Revocation: 0x0f,
++++ Grantee: "/user-7",
++++ },
++++ &jsonRevocation{
++++ Subject: "/user-9",
++++ Revocation: 0x0f,
++++ Grantee: "/user-10",
++++ },
++++ },
++++ },
++++ nil,
++++ })
++++
++++ collapsedGrants, expiration, err = CollapseStatements(statements, false)
++++ if len(collapsedGrants) != 12 {
++++ t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants))
++++ }
++++ if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) {
++++ t.Fatalf("Unexpected expiration time: %s", expiration.String())
++++ }
++++ g = NewMemoryGraph(collapsedGrants)
++++
++++ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f)
++++ testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f)
++++ testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f)
++++
++++ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07)
++++}
++++
++++func TestFilterStatements(t *testing.T) {
++++ grantCount := 8
++++ grants, keys := createTestKeysAndGrants(grantCount)
++++ linkGrants := make([]*Grant, 3)
++++ linkGrants[0] = &Grant{
++++ Subject: "/user-3",
++++ Permission: 0x0f,
++++ Grantee: "/user-2",
++++ }
++++ linkGrants[1] = &Grant{
++++ Subject: "/user-5",
++++ Permission: 0x0f,
++++ Grantee: "/user-4",
++++ }
++++ linkGrants[2] = &Grant{
++++ Subject: "/user-7",
++++ Permission: 0x0f,
++++ Grantee: "/user-6",
++++ }
++++
++++ trustKey, _, chain := generateTrustChain(t, 3)
++++
++++ statements := make([]*Statement, 5)
++++ var err error
++++ statements[0], err = generateStatement(grants[0:2], trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++ statements[1], err = generateStatement(grants[2:4], trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++ statements[2], err = generateStatement(grants[4:6], trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++ statements[3], err = generateStatement(grants[6:], trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++ statements[4], err = generateStatement(linkGrants, trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error generating statement: %s", err)
++++ }
++++ collapsed, _, err := CollapseStatements(statements, false)
++++ if err != nil {
++++ t.Fatalf("Error collapsing grants: %s", err)
++++ }
++++
++++ // Filter 1, all 5 statements
++++ filter1, err := FilterStatements(collapsed)
++++ if err != nil {
++++ t.Fatalf("Error filtering statements: %s", err)
++++ }
++++ if len(filter1) != 5 {
++++ t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1))
++++ }
++++
++++ // Filter 2, one statement
++++ filter2, err := FilterStatements([]*Grant{collapsed[0]})
++++ if err != nil {
++++ t.Fatalf("Error filtering statements: %s", err)
++++ }
++++ if len(filter2) != 1 {
++++ t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2))
++++ }
++++
++++ // Filter 3, 2 statements, from graph lookup
++++ g := NewMemoryGraph(collapsed)
++++ lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f)
++++ if err != nil {
++++ t.Fatalf("Error looking up grants: %s", err)
++++ }
++++ if len(lookupGrants) != 1 {
++++ t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants))
++++ }
++++ if len(lookupGrants[0]) != 2 {
++++ t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants))
++++ }
++++ filter3, err := FilterStatements(lookupGrants[0])
++++ if err != nil {
++++ t.Fatalf("Error filtering statements: %s", err)
++++ }
++++ if len(filter3) != 2 {
++++ t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3))
++++ }
++++
++++}
++++
++++func TestCreateStatement(t *testing.T) {
++++ grantJSON := bytes.NewReader([]byte(`[
++++ {
++++ "subject": "/user-2",
++++ "permission": 15,
++++ "grantee": "/user-1"
++++ },
++++ {
++++ "subject": "/user-7",
++++ "permission": 1,
++++ "grantee": "/user-9"
++++ },
++++ {
++++ "subject": "/user-3",
++++ "permission": 15,
++++ "grantee": "/user-2"
++++ }
++++]`))
++++ revocationJSON := bytes.NewReader([]byte(`[
++++ {
++++ "subject": "user-8",
++++ "revocation": 12,
++++ "grantee": "user-9"
++++ }
++++]`))
++++
++++ trustKey, pool, chain := generateTrustChain(t, 3)
++++
++++ statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain)
++++ if err != nil {
++++ t.Fatalf("Error creating statement: %s", err)
++++ }
++++
++++ b, err := statement.Bytes()
++++ if err != nil {
++++ t.Fatalf("Error retrieving bytes: %s", err)
++++ }
++++
++++ verified, err := LoadStatement(bytes.NewReader(b), pool)
++++ if err != nil {
++++ t.Fatalf("Error loading statement: %s", err)
++++ }
++++
++++ if len(verified.Grants) != 3 {
++++ t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants))
++++ }
++++
++++ if len(verified.Revocations) != 1 {
++++ t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations))
++++ }
++++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++++package libtrust
++++
++++import (
++++ "bytes"
++++ "crypto/elliptic"
++++ "crypto/x509"
++++ "encoding/base32"
++++ "encoding/base64"
++++ "encoding/binary"
++++ "encoding/pem"
++++ "errors"
++++ "fmt"
++++ "math/big"
++++ "strings"
++++)
++++
++++// joseBase64UrlEncode encodes the given data using the standard base64 url
++++// encoding format but with all trailing '=' characters ommitted in accordance
++++// with the jose specification.
++++// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
++++func joseBase64UrlEncode(b []byte) string {
++++ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
++++}
++++
++++// joseBase64UrlDecode decodes the given string using the standard base64 url
++++// decoder but first adds the appropriate number of trailing '=' characters in
++++// accordance with the jose specification.
++++// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
++++func joseBase64UrlDecode(s string) ([]byte, error) {
++++ switch len(s) % 4 {
++++ case 0:
++++ case 2:
++++ s += "=="
++++ case 3:
++++ s += "="
++++ default:
++++ return nil, errors.New("illegal base64url string")
++++ }
++++ return base64.URLEncoding.DecodeString(s)
++++}
++++
++++func keyIDEncode(b []byte) string {
++++ s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
++++ var buf bytes.Buffer
++++ var i int
++++ for i = 0; i < len(s)/4-1; i++ {
++++ start := i * 4
++++ end := start + 4
++++ buf.WriteString(s[start:end] + ":")
++++ }
++++ buf.WriteString(s[i*4:])
++++ return buf.String()
++++}
++++
++++func stringFromMap(m map[string]interface{}, key string) (string, error) {
++++ val, ok := m[key]
++++ if !ok {
++++ return "", fmt.Errorf("%q value not specified", key)
++++ }
++++
++++ str, ok := val.(string)
++++ if !ok {
++++ return "", fmt.Errorf("%q value must be a string", key)
++++ }
++++ delete(m, key)
++++
++++ return str, nil
++++}
++++
++++func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
++++ curveByteLen := (curve.Params().BitSize + 7) >> 3
++++
++++ cBytes, err := joseBase64UrlDecode(cB64Url)
++++ if err != nil {
++++ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
++++ }
++++ cByteLength := len(cBytes)
++++ if cByteLength != curveByteLen {
++++ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
++++ }
++++ return new(big.Int).SetBytes(cBytes), nil
++++}
++++
++++func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
++++ dBytes, err := joseBase64UrlDecode(dB64Url)
++++ if err != nil {
++++ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
++++ }
++++
++++ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
++++ // octets (where n is the order of the curve). This is because the private
++++ // key d must be in the interval [1, n-1] so the bitlength of d should be
++++ // no larger than the bitlength of n-1. The easiest way to find the octet
++++ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
++++ // bit sequence right by 3, which is essentially dividing by 8 and adding
++++ // 1 if there is any remainder. Thus, the private key value d should be
++++ // output to (bitlength(n-1)+7)>>3 octets.
++++ n := curve.Params().N
++++ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
++++ dByteLength := len(dBytes)
++++
++++ if dByteLength != octetLength {
++++ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
++++ }
++++
++++ return new(big.Int).SetBytes(dBytes), nil
++++}
++++
++++func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
++++ nBytes, err := joseBase64UrlDecode(nB64Url)
++++ if err != nil {
++++ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
++++ }
++++
++++ return new(big.Int).SetBytes(nBytes), nil
++++}
++++
++++func serializeRSAPublicExponentParam(e int) []byte {
++++ // We MUST use the minimum number of octets to represent E.
++++ // E is supposed to be 65537 for performance and security reasons
++++ // and is what golang's rsa package generates, but it might be
++++ // different if imported from some other generator.
++++ buf := make([]byte, 4)
++++ binary.BigEndian.PutUint32(buf, uint32(e))
++++ var i int
++++ for i = 0; i < 8; i++ {
++++ if buf[i] != 0 {
++++ break
++++ }
++++ }
++++ return buf[i:]
++++}
++++
++++func parseRSAPublicExponentParam(eB64Url string) (int, error) {
++++ eBytes, err := joseBase64UrlDecode(eB64Url)
++++ if err != nil {
++++ return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
++++ }
++++ // Only the minimum number of bytes were used to represent E, but
++++ // binary.BigEndian.Uint32 expects at least 4 bytes, so we need
++++ // to add zero padding if necassary.
++++ byteLen := len(eBytes)
++++ buf := make([]byte, 4-byteLen, 4)
++++ eBytes = append(buf, eBytes...)
++++
++++ return int(binary.BigEndian.Uint32(eBytes)), nil
++++}
++++
++++func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
++++ b64Url, err := stringFromMap(m, key)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ paramBytes, err := joseBase64UrlDecode(b64Url)
++++ if err != nil {
++++ return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
++++ }
++++
++++ return new(big.Int).SetBytes(paramBytes), nil
++++}
++++
++++func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
++++ pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
++++ for k, v := range headers {
++++ switch val := v.(type) {
++++ case string:
++++ pemBlock.Headers[k] = val
++++ case []string:
++++ if k == "hosts" {
++++ pemBlock.Headers[k] = strings.Join(val, ",")
++++ } else {
++++ // Return error, non-encodable type
++++ }
++++ default:
++++ // Return error, non-encodable type
++++ }
++++ }
++++
++++ return pemBlock, nil
++++}
++++
++++func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
++++ cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
++++ if err != nil {
++++ return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
++++ }
++++
++++ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
++++ if err != nil {
++++ return nil, err
++++ }
++++
++++ addPEMHeadersToKey(pemBlock, pubKey)
++++
++++ return pubKey, nil
++++}
++++
++++func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
++++ for key, value := range pemBlock.Headers {
++++ var safeVal interface{}
++++ if key == "hosts" {
++++ safeVal = strings.Split(value, ",")
++++ } else {
++++ safeVal = value
++++ }
++++ pubKey.AddExtendedField(key, safeVal)
++++ }
++++}