return chrootarchive.UntarWithRoot(src, dst, opts, root)
}
-func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
+func archivePath(i interface{}, src string, opts *archive.TarOptions, root string) (io.ReadCloser, error) {
if ap, ok := i.(archiver); ok {
return ap.ArchivePath(src, opts)
}
- return archive.TarWithOptions(src, opts)
+ return chrootarchive.Tar(src, opts, root)
}
// ContainerCopy performs a deprecated operation of archiving the resource at
sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
- data, err := archivePath(driver, sourceDir, opts)
+ data, err := archivePath(driver, sourceDir, opts, container.BaseFS.Path())
if err != nil {
return nil, nil, err
}
archive, err := archivePath(driver, basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
- })
+ }, container.BaseFS.Path())
if err != nil {
return nil, err
}
"os"
"path/filepath"
"runtime"
+ "strings"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
+ "github.com/pkg/errors"
)
// untar is the entry-point for docker-untar on re-exec. This is not used on
runtime.LockOSThread()
flag.Parse()
- var options *archive.TarOptions
+ var options archive.TarOptions
//read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
- if err := archive.Unpack(os.Stdin, dst, options); err != nil {
+ if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
fatal(err)
}
// fully consume stdin in case it is zero padded
}
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+ if root == "" {
+ return errors.New("must specify a root to chroot to")
+ }
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
}
return nil
}
+
+func tar() {
+ runtime.LockOSThread()
+ flag.Parse()
+
+ src := flag.Arg(0)
+ var root string
+ if len(flag.Args()) > 1 {
+ root = flag.Arg(1)
+ }
+
+ if root == "" {
+ root = src
+ }
+
+ if err := realChroot(root); err != nil {
+ fatal(err)
+ }
+
+ var options archive.TarOptions
+ if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
+ fatal(err)
+ }
+
+ rdr, err := archive.TarWithOptions(src, &options)
+ if err != nil {
+ fatal(err)
+ }
+ defer rdr.Close()
+
+ if _, err := io.Copy(os.Stdout, rdr); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ if root == "" {
+ return nil, errors.New("root path must not be empty")
+ }
+
+ relSrc, err := filepath.Rel(root, srcPath)
+ if err != nil {
+ return nil, err
+ }
+ if relSrc == "." {
+ relSrc = "/"
+ }
+ if relSrc[0] != '/' {
+ relSrc = "/" + relSrc
+ }
+
+ // make sure we didn't trim a trailing slash with the call to `Rel`
+ if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
+ relSrc += "/"
+ }
+
+ cmd := reexec.Command("docker-tar", relSrc, root)
+
+ errBuff := bytes.NewBuffer(nil)
+ cmd.Stderr = errBuff
+
+ tarR, tarW := io.Pipe()
+ cmd.Stdout = tarW
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, errors.Wrap(err, "error getting options pipe for tar process")
+ }
+
+ if err := cmd.Start(); err != nil {
+ return nil, errors.Wrap(err, "tar error on re-exec cmd")
+ }
+
+ go func() {
+ err := cmd.Wait()
+ err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
+ tarW.CloseWithError(err)
+ }()
+
+ if err := json.NewEncoder(stdin).Encode(options); err != nil {
+ stdin.Close()
+ return nil, errors.Wrap(err, "tar json encode to pipe failed")
+ }
+ stdin.Close()
+
+ return tarR, nil
+}
// do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ // Windows is different to Linux here because Windows does not support
+ // chroot. Hence there is no point sandboxing a chrooted process to
+ // do the pack. We call inline instead within the daemon process.
+ return archive.TarWithOptions(srcPath, options)
+}